From e8c0d1f19b2736efb83c67a247e0022da5eaa7bb Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 4 Aug 2021 01:44:57 +0000 Subject: [PATCH] Altair networking (#2300) ## Issue Addressed Resolves #2278 ## Proposed Changes Implements the networking components for the Altair hard fork https://github.com/ethereum/eth2.0-specs/blob/dev/specs/altair/p2p-interface.md ## Additional Info This PR acts as the base branch for networking changes and tracks https://github.com/sigp/lighthouse/pull/2279 . Changes to gossip, rpc and discovery can be separate PRs to be merged here for ease of review. Co-authored-by: realbigsean --- Cargo.lock | 89 +- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +- beacon_node/eth2_libp2p/Cargo.toml | 5 +- beacon_node/eth2_libp2p/src/behaviour/mod.rs | 146 +-- beacon_node/eth2_libp2p/src/config.rs | 106 +- beacon_node/eth2_libp2p/src/discovery/enr.rs | 54 +- beacon_node/eth2_libp2p/src/discovery/mod.rs | 224 +++-- .../src/discovery/subnet_predicate.rs | 57 +- beacon_node/eth2_libp2p/src/lib.rs | 5 +- .../eth2_libp2p/src/peer_manager/mod.rs | 105 +- .../eth2_libp2p/src/peer_manager/peer_info.rs | 27 +- .../eth2_libp2p/src/peer_manager/peerdb.rs | 17 +- beacon_node/eth2_libp2p/src/rpc/codec/base.rs | 99 +- .../eth2_libp2p/src/rpc/codec/ssz_snappy.rs | 945 +++++++++++++++--- beacon_node/eth2_libp2p/src/rpc/handler.rs | 20 +- beacon_node/eth2_libp2p/src/rpc/methods.rs | 21 +- beacon_node/eth2_libp2p/src/rpc/mod.rs | 11 +- beacon_node/eth2_libp2p/src/rpc/outbound.rs | 45 +- beacon_node/eth2_libp2p/src/rpc/protocol.rs | 116 ++- beacon_node/eth2_libp2p/src/service.rs | 56 +- beacon_node/eth2_libp2p/src/types/mod.rs | 8 +- beacon_node/eth2_libp2p/src/types/pubsub.rs | 63 +- beacon_node/eth2_libp2p/src/types/subnet.rs | 23 +- beacon_node/eth2_libp2p/src/types/topics.rs | 112 ++- beacon_node/eth2_libp2p/tests/common/mod.rs | 10 +- beacon_node/eth2_libp2p/tests/rpc_tests.rs | 30 +- beacon_node/http_api/src/lib.rs | 20 +- beacon_node/http_api/tests/tests.rs | 19 +- .../src/attestation_service/tests/mod.rs | 429 -------- .../network/src/beacon_processor/mod.rs | 117 ++- .../network/src/beacon_processor/tests.rs | 15 +- .../beacon_processor/worker/gossip_methods.rs | 370 ++++++- beacon_node/network/src/lib.rs | 2 +- beacon_node/network/src/metrics.rs | 205 +++- beacon_node/network/src/router/mod.rs | 25 + beacon_node/network/src/router/processor.rs | 40 +- beacon_node/network/src/service.rs | 252 +++-- beacon_node/network/src/status.rs | 6 +- .../attestation_subnets.rs} | 94 +- beacon_node/network/src/subnet_service/mod.rs | 50 + .../src/subnet_service/sync_subnets.rs | 350 +++++++ .../network/src/subnet_service/tests/mod.rs | 573 +++++++++++ boot_node/src/config.rs | 4 +- common/eth2/src/types.rs | 1 + consensus/types/src/beacon_block.rs | 221 ++-- consensus/types/src/chain_spec.rs | 53 +- consensus/types/src/eth_spec.rs | 4 + consensus/types/src/fork_context.rs | 91 ++ consensus/types/src/lib.rs | 4 + .../types/src/sync_committee_subscription.rs | 15 + consensus/types/src/sync_subnet_id.rs | 23 + 51 files changed, 4038 insertions(+), 1354 deletions(-) delete mode 100644 beacon_node/network/src/attestation_service/tests/mod.rs rename beacon_node/network/src/{attestation_service/mod.rs => subnet_service/attestation_subnets.rs} (91%) create mode 100644 beacon_node/network/src/subnet_service/mod.rs create mode 100644 beacon_node/network/src/subnet_service/sync_subnets.rs create mode 100644 beacon_node/network/src/subnet_service/tests/mod.rs create mode 100644 consensus/types/src/fork_context.rs create mode 100644 consensus/types/src/sync_committee_subscription.rs diff --git a/Cargo.lock b/Cargo.lock index 22c3cf62d..9b7a594eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1950,6 +1950,7 @@ dependencies = [ "smallvec", "snap", "strum", + "superstruct", "task_executor", "tempfile", "tiny-keccak 2.0.2", @@ -3325,13 +3326,13 @@ dependencies = [ [[package]] name = "libp2p" version = "0.39.1" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "atomic", "bytes 1.0.1", "futures", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -3397,7 +3398,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asn1_der", "bs58", @@ -3430,21 +3431,21 @@ dependencies = [ [[package]] name = "libp2p-deflate" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "flate2", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", ] [[package]] name = "libp2p-dns" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-std-resolver", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "smallvec", "trust-dns-resolver", @@ -3453,12 +3454,12 @@ dependencies = [ [[package]] name = "libp2p-floodsub" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "cuckoofilter", "fnv", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3470,7 +3471,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.32.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -3479,7 +3480,7 @@ dependencies = [ "fnv", "futures", "hex_fmt", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3495,10 +3496,10 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3510,7 +3511,7 @@ dependencies = [ [[package]] name = "libp2p-kad" version = "0.31.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec", @@ -3518,7 +3519,7 @@ dependencies = [ "either", "fnv", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3535,7 +3536,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.31.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-io", "data-encoding", @@ -3543,7 +3544,7 @@ dependencies = [ "futures", "if-watch", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "rand 0.8.4", @@ -3555,12 +3556,12 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "nohash-hasher", "parking_lot", @@ -3572,13 +3573,13 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.32.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "prost", "prost-build", @@ -3593,10 +3594,10 @@ dependencies = [ [[package]] name = "libp2p-ping" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "rand 0.7.3", @@ -3607,12 +3608,12 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "prost", "prost-build", @@ -3623,7 +3624,7 @@ dependencies = [ [[package]] name = "libp2p-pnet" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", "log", @@ -3636,13 +3637,13 @@ dependencies = [ [[package]] name = "libp2p-relay" version = "0.3.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", "futures-timer", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "pin-project 1.0.7", @@ -3658,12 +3659,12 @@ dependencies = [ [[package]] name = "libp2p-request-response" version = "0.12.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-trait", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "lru", @@ -3677,11 +3678,11 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "rand 0.7.3", "smallvec", @@ -3692,7 +3693,7 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "quote", "syn", @@ -3701,7 +3702,7 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-io", "futures", @@ -3710,7 +3711,7 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "socket2 0.4.0", "tokio 1.8.1", @@ -3719,22 +3720,22 @@ dependencies = [ [[package]] name = "libp2p-uds" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-std", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", ] [[package]] name = "libp2p-wasm-ext" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", "js-sys", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", @@ -3743,12 +3744,12 @@ dependencies = [ [[package]] name = "libp2p-websocket" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "quicksink", "rw-stream-sink", @@ -3760,10 +3761,10 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.33.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "parking_lot", "thiserror", "yamux", @@ -4252,7 +4253,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.10.3" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "futures", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index aeb30bf2e..b8c3f4435 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3324,14 +3324,21 @@ impl BeaconChain { // therefore use the genesis slot. let slot = self.slot().unwrap_or(self.spec.genesis_slot); - self.spec.enr_fork_id(slot, self.genesis_validators_root) + self.spec + .enr_fork_id::(slot, self.genesis_validators_root) } - /// Calculates the `Duration` to the next fork, if one exists. - pub fn duration_to_next_fork(&self) -> Option { - let epoch = self.spec.next_fork_epoch()?; + /// Calculates the `Duration` to the next fork if it exists and returns it + /// with it's corresponding `ForkName`. + pub fn duration_to_next_fork(&self) -> Option<(ForkName, Duration)> { + // If we are unable to read the slot clock we assume that it is prior to genesis and + // therefore use the genesis slot. + let slot = self.slot().unwrap_or(self.spec.genesis_slot); + + let (fork_name, epoch) = self.spec.next_fork_epoch::(slot)?; self.slot_clock .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + .map(|duration| (fork_name, duration)) } pub fn dump_as_dot(&self, output: &mut W) { diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index 9f3e48cfa..3029cb03c 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -40,12 +40,15 @@ rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.20", features = ["derive"] } +superstruct = "0.2.0" [dependencies.libp2p] #version = "0.39.1" #default-features = false +# TODO: Update once https://github.com/libp2p/rust-libp2p/pull/2103 and +# https://github.com/libp2p/rust-libp2p/pull/2137 are merged upstream. git = "https://github.com/sigp/rust-libp2p" -rev = "323cae1d08112052740834aa1fb262ae43e6f783" +rev = "75fd53ec5407a58ae1ff600fd1c68ea49079364a" features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] [dev-dependencies] diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index c509da6a3..83161c670 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -1,6 +1,7 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; +use crate::config::gossipsub_config; use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; use crate::peer_manager::{ score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -8,7 +9,7 @@ use crate::peer_manager::{ use crate::rpc::*; use crate::service::METADATA_FILENAME; use crate::types::{ - subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; @@ -42,7 +43,10 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use types::{ChainSpec, EnrForkId, EthSpec, SignedBeaconBlock, Slot, SubnetId}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, ChainSpec, EnrForkId, EthSpec, ForkContext, + SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, +}; pub mod gossipsub_scoring_parameters; @@ -157,6 +161,8 @@ pub struct Behaviour { /// Directory where metadata is stored. #[behaviour(ignore)] network_dir: PathBuf, + #[behaviour(ignore)] + fork_context: Arc, /// Gossipsub score parameters. #[behaviour(ignore)] score_settings: PeerScoreSettings, @@ -172,9 +178,10 @@ pub struct Behaviour { impl Behaviour { pub async fn new( local_key: &Keypair, - config: &NetworkConfig, + mut config: NetworkConfig, network_globals: Arc>, log: &slog::Logger, + fork_context: Arc, chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); @@ -191,7 +198,8 @@ impl Behaviour { }; // Build and start the discovery sub-behaviour - let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; + let mut discovery = + Discovery::new(local_key, &config, network_globals.clone(), log).await?; // start searching for peers discovery.discover_peers(); @@ -201,13 +209,19 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = vec![enr_fork_id.fork_digest]; + let possible_fork_digests = fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter(possible_fork_digests, 64), //TODO change this to a constant + filter: Self::create_whitelist_filter( + possible_fork_digests, + chain_spec.attestation_subnet_count, + SYNC_COMMITTEE_SUBNET_COUNT, + ), max_subscribed_topics: 200, //TODO change this to a constant max_subscriptions_per_request: 100, //this is according to the current go implementation }; + config.gs_config = gossipsub_config(fork_context.clone()); + // Build and configure the Gossipsub behaviour let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -247,11 +261,11 @@ impl Behaviour { Ok(Behaviour { // Sub-behaviours gossipsub, - eth2_rpc: RPC::new(log.clone()), + eth2_rpc: RPC::new(fork_context.clone(), log.clone()), discovery, identify: Identify::new(identify_config), // Auxiliary fields - peer_manager: PeerManager::new(config, network_globals.clone(), log).await?, + peer_manager: PeerManager::new(&config, network_globals.clone(), log).await?, events: VecDeque::new(), internal_events: VecDeque::new(), network_globals, @@ -260,6 +274,7 @@ impl Behaviour { network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, + fork_context, update_gossipsub_scores, }) } @@ -311,28 +326,20 @@ impl Behaviour { self.unsubscribe(gossip_topic) } - /// Subscribes to a specific subnet id; - pub fn subscribe_to_subnet(&mut self, subnet_id: SubnetId) -> bool { - let topic = GossipTopic::new( - subnet_id.into(), - GossipEncoding::default(), - self.enr_fork_id.fork_digest, - ); - self.subscribe(topic) - } - - /// Un-Subscribes from a specific subnet id; - pub fn unsubscribe_from_subnet(&mut self, subnet_id: SubnetId) -> bool { - let topic = GossipTopic::new( - subnet_id.into(), - GossipEncoding::default(), - self.enr_fork_id.fork_digest, - ); - self.unsubscribe(topic) + /// Unsubscribe from all topics that doesn't have the given fork_digest + pub fn unsubscribe_from_fork_topics_except(&mut self, except: [u8; 4]) { + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + .cloned() + { + self.unsubscribe(topic); + } } /// Subscribes to a gossipsub topic. - fn subscribe(&mut self, topic: GossipTopic) -> bool { + pub fn subscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals .gossipsub_subscriptions @@ -354,7 +361,7 @@ impl Behaviour { } /// Unsubscribe from a gossipsub topic. - fn unsubscribe(&mut self, topic: GossipTopic) -> bool { + pub fn unsubscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals .gossipsub_subscriptions @@ -537,15 +544,15 @@ impl Behaviour { self.discovery.add_enr(enr); } - /// Updates a subnet value to the ENR bitfield. + /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. - pub fn update_enr_subnet(&mut self, subnet_id: SubnetId, value: bool) { + pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS - self.update_metadata(); + self.update_metadata_bitfields(); } /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we @@ -564,20 +571,24 @@ impl Behaviour { self.network_globals .peers .write() - .extend_peers_on_subnet(s.subnet_id, min_ttl); + .extend_peers_on_subnet(&s.subnet, min_ttl); + if let Subnet::SyncCommittee(sync_subnet) = s.subnet { + self.peer_manager_mut() + .add_sync_subnet(sync_subnet, min_ttl); + } } // Already have target number of peers, no need for subnet discovery let peers_on_subnet = self .network_globals .peers .read() - .good_peers_on_subnet(s.subnet_id) + .good_peers_on_subnet(s.subnet) .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { trace!( self.log, "Discovery query ignored"; - "subnet_id" => ?s.subnet_id, + "subnet" => ?s.subnet, "reason" => "Already connected to desired peers", "connected_peers_on_subnet" => peers_on_subnet, "target_subnet_peers" => TARGET_SUBNET_PEERS, @@ -587,7 +598,7 @@ impl Behaviour { // If we connect to the cached peers before the discovery query starts, then we potentially // save a costly discovery query. } else { - self.dial_cached_enrs_in_subnet(s.subnet_id); + self.dial_cached_enrs_in_subnet(s.subnet); true } }) @@ -603,26 +614,6 @@ impl Behaviour { pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { self.discovery.update_eth2_enr(enr_fork_id.clone()); - // unsubscribe from all gossip topics and re-subscribe to their new fork counterparts - let subscribed_topics = self - .network_globals - .gossipsub_subscriptions - .read() - .iter() - .cloned() - .collect::>(); - - // unsubscribe from all topics - for topic in &subscribed_topics { - self.unsubscribe(topic.clone()); - } - - // re-subscribe modifying the fork version - for mut topic in subscribed_topics { - *topic.digest() = enr_fork_id.fork_digest; - self.subscribe(topic); - } - // update the local reference self.enr_fork_id = enr_fork_id; } @@ -630,18 +621,28 @@ impl Behaviour { /* Private internal functions */ /// Updates the current meta data of the node to match the local ENR. - fn update_metadata(&mut self) { + fn update_metadata_bitfields(&mut self) { let local_attnets = self .discovery .local_enr() - .bitfield::() - .expect("Local discovery must have bitfield"); + .attestation_bitfield::() + .expect("Local discovery must have attestation bitfield"); + + let local_syncnets = self + .discovery + .local_enr() + .sync_committee_bitfield::() + .expect("Local discovery must have sync committee bitfield"); { // write lock scope let mut meta_data = self.network_globals.local_metadata.write(); - meta_data.seq_number += 1; - meta_data.attnets = local_attnets; + + *meta_data.seq_number_mut() += 1; + *meta_data.attnets_mut() = local_attnets; + if let Ok(syncnets) = meta_data.syncnets_mut() { + *syncnets = local_syncnets; + } } // Save the updated metadata to disk save_metadata_to_disk( @@ -654,7 +655,7 @@ impl Behaviour { /// Sends a Ping request to the peer. fn ping(&mut self, id: RequestId, peer_id: PeerId) { let ping = crate::rpc::Ping { - data: self.network_globals.local_metadata.read().seq_number, + data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => %peer_id); @@ -665,7 +666,7 @@ impl Behaviour { /// Sends a Pong response to the peer. fn pong(&mut self, id: PeerRequestId, peer_id: PeerId) { let ping = crate::rpc::Ping { - data: self.network_globals.local_metadata.read().seq_number, + data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); @@ -724,8 +725,8 @@ impl Behaviour { /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. - fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) { - let predicate = subnet_predicate::(vec![subnet_id], &self.log); + fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { + let predicate = subnet_predicate::(vec![subnet], &self.log); let peers_to_dial: Vec = self .discovery .cached_enrs() @@ -752,6 +753,7 @@ impl Behaviour { fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, + sync_committee_subnet_count: u64, ) -> WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -767,9 +769,13 @@ impl Behaviour { add(VoluntaryExit); add(ProposerSlashing); add(AttesterSlashing); + add(SignedContributionAndProof); for id in 0..attestation_subnet_count { add(Attestation(SubnetId::new(id))); } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } } WhitelistSubscriptionFilter(possible_hashes) } @@ -792,9 +798,9 @@ impl NetworkBehaviourEventProcess for Behaviour< } => { // Note: We are keeping track here of the peer that sent us the message, not the // peer that originally published the message. - match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data) { + match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data, &self.fork_context) { Err(e) => { - debug!(self.log, "Could not decode gossipsub message"; "error" => e); + debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); //reject the message if let Err(e) = self.gossipsub.report_message_validation_result( &id, @@ -816,12 +822,12 @@ impl NetworkBehaviourEventProcess for Behaviour< } } GossipsubEvent::Subscribed { peer_id, topic } => { - if let Some(subnet_id) = subnet_id_from_topic_hash(&topic) { + if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.peer_manager.add_subscription(&peer_id, subnet_id); } } GossipsubEvent::Unsubscribed { peer_id, topic } => { - if let Some(subnet_id) = subnet_id_from_topic_hash(&topic) { + if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.peer_manager.remove_subscription(&peer_id, subnet_id); } } @@ -1089,6 +1095,10 @@ impl Behaviour { // Peer manager has requested a discovery query for more peers. self.discovery.discover_peers(); } + PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { + // Peer manager has requested a subnet discovery query for more peers. + self.discover_subnet_peers(subnets_to_discover); + } PeerManagerEvent::Ping(peer_id) => { // send a ping request to this peer self.ping(RequestId::Behaviour, peer_id); diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 5add5fdf9..3face7b98 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -12,7 +12,9 @@ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; +use types::{ForkContext, ForkName}; /// The maximum transmit size of gossip messages in bytes. pub const GOSSIP_MAX_SIZE: usize = 1_048_576; @@ -109,47 +111,9 @@ impl Default for Config { .join(DEFAULT_BEACON_NODE_DIR) .join(DEFAULT_NETWORK_DIR); - // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = |message: &RawGossipsubMessage| { - FastMessageId::from(&Sha256::digest(&message.data)[..8]) - }; - - fn prefix(prefix: [u8; 4], data: &[u8]) -> Vec { - let mut vec = Vec::with_capacity(prefix.len() + data.len()); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(data); - vec - } - - let gossip_message_id = |message: &GossipsubMessage| { - MessageId::from( - &Sha256::digest(prefix(MESSAGE_DOMAIN_VALID_SNAPPY, &message.data).as_slice()) - [..20], - ) - }; - - // gossipsub configuration - // Note: The topics by default are sent as plain strings. Hashes are an optional - // parameter. + // Note: Using the default config here. Use `gossipsub_config` function for getting + // Lighthouse specific configuration for gossipsub. let gs_config = GossipsubConfigBuilder::default() - .max_transmit_size(GOSSIP_MAX_SIZE) - .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(8) - .mesh_n_low(MESH_N_LOW) - .mesh_n_high(12) - .gossip_lazy(6) - .fanout_ttl(Duration::from_secs(60)) - .history_length(12) - .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large - .history_gossip(3) - .validate_messages() // require validation before propagation - .validation_mode(ValidationMode::Anonymous) - // prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs - .duplicate_cache_time(Duration::from_secs(385)) - .message_id_fn(gossip_message_id) - .fast_message_id_fn(fast_gossip_message_id) - .allow_self_origin(true) .build() .expect("valid gossipsub configuration"); @@ -209,3 +173,65 @@ impl Default for Config { } } } + +/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. +pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { + // The function used to generate a gossipsub message id + // We use the first 8 bytes of SHA256(data) for content addressing + let fast_gossip_message_id = + |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + fn prefix( + prefix: [u8; 4], + message: &GossipsubMessage, + fork_context: Arc, + ) -> Vec { + let topic_bytes = message.topic.as_str().as_bytes(); + match fork_context.current_fork() { + ForkName::Altair => { + let topic_len_bytes = topic_bytes.len().to_le_bytes(); + let mut vec = Vec::with_capacity( + prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), + ); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&topic_len_bytes); + vec.extend_from_slice(topic_bytes); + vec.extend_from_slice(&message.data); + vec + } + ForkName::Base => { + let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&message.data); + vec + } + } + } + + let gossip_message_id = move |message: &GossipsubMessage| { + MessageId::from( + &Sha256::digest( + prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), + )[..20], + ) + }; + GossipsubConfigBuilder::default() + .max_transmit_size(GOSSIP_MAX_SIZE) + .heartbeat_interval(Duration::from_millis(700)) + .mesh_n(8) + .mesh_n_low(MESH_N_LOW) + .mesh_n_high(12) + .gossip_lazy(6) + .fanout_ttl(Duration::from_secs(60)) + .history_length(12) + .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large + .history_gossip(3) + .validate_messages() // require validation before propagation + .validation_mode(ValidationMode::Anonymous) + // prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs + .duplicate_cache_time(Duration::from_secs(385)) + .message_id_fn(gossip_message_id) + .fast_message_id_fn(fast_gossip_message_id) + .allow_self_origin(true) + .build() + .expect("valid gossipsub configuration") +} diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index a8f058636..3f2ae759b 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -4,7 +4,7 @@ pub use discv5::enr::{self, CombinedKey, EnrBuilder}; use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; -use crate::types::{Enr, EnrBitfield}; +use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use discv5::enr::EnrKey; use libp2p::core::identity::Keypair; @@ -19,25 +19,47 @@ use types::{EnrForkId, EthSpec}; /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; -/// The ENR field specifying the subnet bitfield. -pub const BITFIELD_ENR_KEY: &str = "attnets"; +/// The ENR field specifying the attestation subnet bitfield. +pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; +/// The ENR field specifying the sync committee subnet bitfield. +pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { - /// The subnet bitfield associated with the ENR. - fn bitfield(&self) -> Result, &'static str>; + /// The attestation subnet bitfield associated with the ENR. + fn attestation_bitfield( + &self, + ) -> Result, &'static str>; + + /// The sync committee subnet bitfield associated with the ENR. + fn sync_committee_bitfield( + &self, + ) -> Result, &'static str>; fn eth2(&self) -> Result; } impl Eth2Enr for Enr { - fn bitfield(&self) -> Result, &'static str> { + fn attestation_bitfield( + &self, + ) -> Result, &'static str> { let bitfield_bytes = self - .get(BITFIELD_ENR_KEY) - .ok_or("ENR bitfield non-existent")?; + .get(ATTESTATION_BITFIELD_ENR_KEY) + .ok_or("ENR attestation bitfield non-existent")?; BitVector::::from_ssz_bytes(bitfield_bytes) - .map_err(|_| "Could not decode the ENR SSZ bitfield") + .map_err(|_| "Could not decode the ENR attnets bitfield") + } + + fn sync_committee_bitfield( + &self, + ) -> Result, &'static str> { + let bitfield_bytes = self + .get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + .ok_or("ENR sync committee bitfield non-existent")?; + + BitVector::::from_ssz_bytes(bitfield_bytes) + .map_err(|_| "Could not decode the ENR syncnets bitfield") } fn eth2(&self) -> Result { @@ -151,7 +173,12 @@ pub fn build_enr( // set the "attnets" field on our ENR let bitfield = BitVector::::new(); - builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + + // set the "syncnets" field on our ENR + let bitfield = BitVector::::new(); + + builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); builder .build(enr_key) @@ -169,9 +196,10 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) - // we need the BITFIELD_ENR_KEY key to match, otherwise we use a new ENR. This will likely only - // be true for non-validating nodes - && local_enr.get(BITFIELD_ENR_KEY) == disk_enr.get(BITFIELD_ENR_KEY) + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes + && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) + && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index a5159eff2..3866e4d47 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -8,31 +8,28 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder use crate::{config, metrics}; -use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery}; +use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr, }; -use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; +pub use libp2p::core::identity::{Keypair, PublicKey}; + +use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; pub use libp2p::{ - core::{ - connection::ConnectionId, - identity::{Keypair, PublicKey}, - ConnectedPoint, Multiaddr, PeerId, - }, + core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters, SubstreamProtocol, }, }; use lru::LruCache; -use slog::{crit, debug, error, info, warn}; -use ssz::{Decode, Encode}; -use ssz_types::BitVector; +use slog::{crit, debug, error, info, trace, warn}; +use ssz::Encode; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -43,7 +40,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::mpsc; -use types::{EnrForkId, EthSpec, SubnetId}; +use types::{EnrForkId, EthSpec}; mod subnet_predicate; pub use subnet_predicate::subnet_predicate; @@ -77,13 +74,26 @@ pub enum DiscoveryEvent { SocketUpdated(SocketAddr), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] struct SubnetQuery { - subnet_id: SubnetId, + subnet: Subnet, min_ttl: Option, retries: usize, } +impl std::fmt::Debug for SubnetQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let min_ttl_secs = self + .min_ttl + .map(|ttl| ttl.saturating_duration_since(Instant::now()).as_secs()); + f.debug_struct("SubnetQuery") + .field("subnet", &self.subnet) + .field("min_ttl_secs", &min_ttl_secs) + .field("retries", &self.retries) + .finish() + } +} + #[derive(Debug, Clone, PartialEq)] enum QueryType { /// We are searching for subnet peers. @@ -335,13 +345,13 @@ impl Discovery { if !self.started { return; } - debug!( + trace!( self.log, "Making discovery query for subnets"; - "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet_id).collect::>() + "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() ); for subnet in subnets_to_discover { - self.add_subnet_query(subnet.subnet_id, subnet.min_ttl, 0); + self.add_subnet_query(subnet.subnet, subnet.min_ttl, 0); } } @@ -426,42 +436,84 @@ impl Discovery { Ok(()) } - /// Adds/Removes a subnet from the ENR Bitfield - pub fn update_enr_bitfield(&mut self, subnet_id: SubnetId, value: bool) -> Result<(), String> { - let id = *subnet_id as usize; - + /// Adds/Removes a subnet from the ENR attnets/syncnets Bitfield + pub fn update_enr_bitfield(&mut self, subnet: Subnet, value: bool) -> Result<(), String> { let local_enr = self.discv5.local_enr(); - let mut current_bitfield = local_enr.bitfield::()?; - if id >= current_bitfield.len() { - return Err(format!( - "Subnet id: {} is outside the ENR bitfield length: {}", - id, - current_bitfield.len() - )); + match subnet { + Subnet::Attestation(id) => { + let id = *id as usize; + let mut current_bitfield = local_enr.attestation_bitfield::()?; + if id >= current_bitfield.len() { + return Err(format!( + "Subnet id: {} is outside the ENR bitfield length: {}", + id, + current_bitfield.len() + )); + } + + if current_bitfield + .get(id) + .map_err(|_| String::from("Subnet ID out of bounds"))? + == value + { + return Err(format!( + "Subnet id: {} already in the local ENR already has value: {}", + id, value + )); + } + + // set the subnet bitfield in the ENR + current_bitfield.set(id, value).map_err(|_| { + String::from("Subnet ID out of bounds, could not set subnet ID") + })?; + + // insert the bitfield into the ENR record + self.discv5 + .enr_insert( + ATTESTATION_BITFIELD_ENR_KEY, + ¤t_bitfield.as_ssz_bytes(), + ) + .map_err(|e| format!("{:?}", e))?; + } + Subnet::SyncCommittee(id) => { + let id = *id as usize; + let mut current_bitfield = local_enr.sync_committee_bitfield::()?; + + if id >= current_bitfield.len() { + return Err(format!( + "Subnet id: {} is outside the ENR bitfield length: {}", + id, + current_bitfield.len() + )); + } + + if current_bitfield + .get(id) + .map_err(|_| String::from("Subnet ID out of bounds"))? + == value + { + return Err(format!( + "Subnet id: {} already in the local ENR already has value: {}", + id, value + )); + } + + // set the subnet bitfield in the ENR + current_bitfield.set(id, value).map_err(|_| { + String::from("Subnet ID out of bounds, could not set subnet ID") + })?; + + // insert the bitfield into the ENR record + self.discv5 + .enr_insert( + SYNC_COMMITTEE_BITFIELD_ENR_KEY, + ¤t_bitfield.as_ssz_bytes(), + ) + .map_err(|e| format!("{:?}", e))?; + } } - if current_bitfield - .get(id) - .map_err(|_| String::from("Subnet ID out of bounds"))? - == value - { - return Err(format!( - "Subnet id: {} already in the local ENR already has value: {}", - id, value - )); - } - - // set the subnet bitfield in the ENR - current_bitfield - .set(id, value) - .map_err(|_| String::from("Subnet ID out of bounds, could not set subnet ID"))?; - - // insert the bitfield into the ENR record - self.discv5 - .enr_insert(BITFIELD_ENR_KEY, ¤t_bitfield.as_ssz_bytes()) - .map_err(|e| format!("{:?}", e))?; - // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); @@ -547,7 +599,7 @@ impl Discovery { /// Adds a subnet query if one doesn't exist. If a subnet query already exists, this /// updates the min_ttl field. - fn add_subnet_query(&mut self, subnet_id: SubnetId, min_ttl: Option, retries: usize) { + fn add_subnet_query(&mut self, subnet: Subnet, min_ttl: Option, retries: usize) { // remove the entry and complete the query if greater than the maximum search count if retries > MAX_DISCOVERY_RETRY { debug!( @@ -562,7 +614,7 @@ impl Discovery { let mut found = false; for query in self.queued_queries.iter_mut() { if let QueryType::Subnet(ref mut subnet_query) = query { - if subnet_query.subnet_id == subnet_id { + if subnet_query.subnet == subnet { if subnet_query.min_ttl < min_ttl { subnet_query.min_ttl = min_ttl; } @@ -577,12 +629,12 @@ impl Discovery { if !found { // Set up the query and add it to the queue let query = QueryType::Subnet(SubnetQuery { - subnet_id, + subnet, min_ttl, retries, }); // update the metrics and insert into the queue. - debug!(self.log, "Queuing subnet query"; "subnet" => *subnet_id, "retries" => retries); + trace!(self.log, "Queuing subnet query"; "subnet" => ?subnet, "retries" => retries); self.queued_queries.push_back(query); metrics::set_gauge(&metrics::DISCOVERY_QUEUE, self.queued_queries.len() as i64); } @@ -636,11 +688,6 @@ impl Discovery { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue let grouped_queries: Vec = subnet_queries.drain(..).collect(); - debug!( - self.log, - "Starting grouped subnet query"; - "subnets" => ?grouped_queries.iter().map(|q| q.subnet_id).collect::>(), - ); self.start_subnet_query(grouped_queries); processed = true; } @@ -661,7 +708,7 @@ impl Discovery { /// Runs a discovery request for a given group of subnets. fn start_subnet_query(&mut self, subnet_queries: Vec) { - let mut filtered_subnet_ids: Vec = Vec::new(); + let mut filtered_subnets: Vec = Vec::new(); // find subnet queries that are still necessary let filtered_subnet_queries: Vec = subnet_queries @@ -672,7 +719,7 @@ impl Discovery { .network_globals .peers .read() - .good_peers_on_subnet(subnet_query.subnet_id) + .good_peers_on_subnet(subnet_query.subnet) .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { @@ -685,16 +732,13 @@ impl Discovery { } let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; - debug!(self.log, "Discovery query started for subnet"; - "subnet_id" => *subnet_query.subnet_id, + trace!(self.log, "Discovery query started for subnet"; + "subnet_query" => ?subnet_query, "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, "peers_to_find" => target_peers, - "attempt" => subnet_query.retries, - "min_ttl" => ?subnet_query.min_ttl, ); - filtered_subnet_ids.push(subnet_query.subnet_id); + filtered_subnets.push(subnet_query.subnet); true }) .collect(); @@ -702,8 +746,13 @@ impl Discovery { // Only start a discovery query if we have a subnet to look for. if !filtered_subnet_queries.is_empty() { // build the subnet predicate as a combination of the eth2_fork_predicate and the subnet predicate - let subnet_predicate = subnet_predicate::(filtered_subnet_ids, &self.log); + let subnet_predicate = subnet_predicate::(filtered_subnets, &self.log); + debug!( + self.log, + "Starting grouped subnet query"; + "subnets" => ?filtered_subnet_queries, + ); self.start_query( GroupedQueryType::Subnet(filtered_subnet_queries), TARGET_PEERS_FOR_GROUPED_QUERY, @@ -798,17 +847,13 @@ impl Discovery { } } GroupedQueryType::Subnet(queries) => { - let subnets_searched_for: Vec = - queries.iter().map(|query| query.subnet_id).collect(); + let subnets_searched_for: Vec = + queries.iter().map(|query| query.subnet).collect(); match query_result.1 { Ok(r) if r.is_empty() => { debug!(self.log, "Grouped subnet discovery query yielded no results."; "subnets_searched_for" => ?subnets_searched_for); queries.iter().for_each(|query| { - self.add_subnet_query( - query.subnet_id, - query.min_ttl, - query.retries + 1, - ); + self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); }) } Ok(r) => { @@ -824,15 +869,11 @@ impl Discovery { // Map each subnet query's min_ttl to the set of ENR's returned for that subnet. queries.iter().for_each(|query| { // A subnet query has completed. Add back to the queue, incrementing retries. - self.add_subnet_query( - query.subnet_id, - query.min_ttl, - query.retries + 1, - ); + self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); // Check the specific subnet against the enr let subnet_predicate = - subnet_predicate::(vec![query.subnet_id], &self.log); + subnet_predicate::(vec![query.subnet], &self.log); r.iter() .filter(|enr| subnet_predicate(enr)) @@ -1037,11 +1078,11 @@ impl NetworkBehaviour for Discovery { #[cfg(test)] mod tests { use super::*; - use crate::rpc::methods::MetaData; + use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; use slog::{o, Drain}; use std::net::UdpSocket; - use types::MinimalEthSpec; + use types::{BitVector, MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; @@ -1076,10 +1117,11 @@ mod tests { enr, 9000, 9000, - MetaData { + MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), - }, + syncnets: Default::default(), + }), vec![], &log, ); @@ -1093,12 +1135,12 @@ mod tests { let mut discovery = build_discovery().await; let now = Instant::now(); let mut subnet_query = SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: Some(now), retries: 0, }; discovery.add_subnet_query( - subnet_query.subnet_id, + subnet_query.subnet, subnet_query.min_ttl, subnet_query.retries, ); @@ -1109,7 +1151,7 @@ mod tests { // New query should replace old query subnet_query.min_ttl = Some(now + Duration::from_secs(1)); - discovery.add_subnet_query(subnet_query.subnet_id, subnet_query.min_ttl, 1); + discovery.add_subnet_query(subnet_query.subnet, subnet_query.min_ttl, 1); subnet_query.retries += 1; @@ -1122,7 +1164,7 @@ mod tests { // Retries > MAX_DISCOVERY_RETRY must return immediately without adding // anything. discovery.add_subnet_query( - subnet_query.subnet_id, + subnet_query.subnet, subnet_query.min_ttl, MAX_DISCOVERY_RETRY + 1, ); @@ -1140,7 +1182,7 @@ mod tests { let now = Instant::now(); let subnet_query = SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: Some(now + Duration::from_secs(10)), retries: 0, }; @@ -1174,7 +1216,7 @@ mod tests { bitfield.set(id, true).unwrap(); } - builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); builder.build(&enr_key).unwrap() } @@ -1187,12 +1229,12 @@ mod tests { let query = GroupedQueryType::Subnet(vec![ SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: instant1, retries: 0, }, SubnetQuery { - subnet_id: SubnetId::new(2), + subnet: Subnet::Attestation(SubnetId::new(2)), min_ttl: instant2, retries: 0, }, diff --git a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs index 0b761eedd..e324532f7 100644 --- a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs +++ b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs @@ -1,11 +1,12 @@ ///! The subnet predicate used for searching for a particular subnet. use super::*; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use slog::trace; use std::ops::Deref; /// Returns the predicate for a given subnet. pub fn subnet_predicate( - subnet_ids: Vec, + subnets: Vec, log: &slog::Logger, ) -> impl Fn(&Enr) -> bool + Send where @@ -14,39 +15,33 @@ where let log_clone = log.clone(); move |enr: &Enr| { - if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) { - let bitfield = match BitVector::::from_ssz_bytes( - bitfield_bytes, - ) { - Ok(v) => v, - Err(e) => { - warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e)); - return false; - } + let attestation_bitfield: EnrAttestationBitfield = + match enr.attestation_bitfield::() { + Ok(b) => b, + Err(_e) => return false, }; - let matches: Vec<&SubnetId> = subnet_ids - .iter() - .filter(|id| bitfield.get(**id.deref() as usize).unwrap_or(false)) - .collect(); + // Pre-fork/fork-boundary enrs may not contain a syncnets field. + // Don't return early here + let sync_committee_bitfield: Result, _> = + enr.sync_committee_bitfield::(); - if matches.is_empty() { - trace!( - log_clone, - "Peer found but not on any of the desired subnets"; - "peer_id" => %enr.peer_id() - ); - return false; - } else { - trace!( - log_clone, - "Peer found on desired subnet(s)"; - "peer_id" => %enr.peer_id(), - "subnets" => ?matches.as_slice() - ); - return true; - } + let predicate = subnets.iter().any(|subnet| match subnet { + Subnet::Attestation(s) => attestation_bitfield + .get(*s.deref() as usize) + .unwrap_or(false), + Subnet::SyncCommittee(s) => sync_committee_bitfield + .as_ref() + .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), + }); + + if !predicate { + trace!( + log_clone, + "Peer found but not on any of the desired subnets"; + "peer_id" => %enr.peer_id() + ); } - false + predicate } } diff --git a/beacon_node/eth2_libp2p/src/lib.rs b/beacon_node/eth2_libp2p/src/lib.rs index 3a582ac71..c04c61616 100644 --- a/beacon_node/eth2_libp2p/src/lib.rs +++ b/beacon_node/eth2_libp2p/src/lib.rs @@ -60,7 +60,10 @@ impl<'de> Deserialize<'de> for PeerIdSerialized { } } -pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery}; +pub use crate::types::{ + error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, + SubnetDiscovery, +}; pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index 26234a93f..34ba564d6 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -1,10 +1,12 @@ //! Implementation of Lighthouse's peer management system. pub use self::peerdb::*; +use crate::discovery::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::types::SyncState; use crate::{error, metrics, Gossipsub}; use crate::{NetworkConfig, NetworkGlobals, PeerId}; +use crate::{Subnet, SubnetDiscovery}; use discv5::Enr; use futures::prelude::*; use futures::Stream; @@ -19,7 +21,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use types::{EthSpec, SubnetId}; +use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -34,7 +36,7 @@ pub use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerConnectionSta pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; use score::{PeerAction, ReportSource, ScoreState}; use std::cmp::Ordering; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::net::IpAddr; /// The time in seconds between re-status's peers. @@ -78,6 +80,11 @@ pub struct PeerManager { target_peers: usize, /// The maximum number of peers we allow (exceptions for subnet peers) max_peers: usize, + /// A collection of sync committee subnets that we need to stay subscribed to. + /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run + /// discovery queries for subnet peers if we disconnect from existing sync + /// committee subnet peers. + sync_committee_subnets: HashMap, /// The heartbeat interval to perform routine maintenance. heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. @@ -108,6 +115,8 @@ pub enum PeerManagerEvent { UnBanned(PeerId, Vec), /// Request the behaviour to discover more peers. DiscoverPeers, + /// Request the behaviour to discover peers on subnets. + DiscoverSubnetPeers(Vec), } impl PeerManager { @@ -127,6 +136,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL_OUTBOUND)), status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), target_peers: config.target_peers, + sync_committee_subnets: Default::default(), max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize, heartbeat, discovery_enabled: !config.disable_discovery, @@ -264,16 +274,16 @@ impl PeerManager { } /// Adds a gossipsub subscription to a peer in the peerdb. - pub fn add_subscription(&self, peer_id: &PeerId, subnet_id: SubnetId) { + pub fn add_subscription(&self, peer_id: &PeerId, subnet: Subnet) { if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - info.subnets.insert(subnet_id); + info.subnets.insert(subnet); } } /// Removes a gossipsub subscription to a peer in the peerdb. - pub fn remove_subscription(&self, peer_id: &PeerId, subnet_id: SubnetId) { + pub fn remove_subscription(&self, peer_id: &PeerId, subnet: Subnet) { if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - info.subnets.remove(&subnet_id); + info.subnets.remove(&subnet); } } @@ -284,6 +294,21 @@ impl PeerManager { } } + /// Insert the sync subnet into list of long lived sync committee subnets that we need to + /// maintain adequate number of peers for. + pub fn add_sync_subnet(&mut self, subnet_id: SyncSubnetId, min_ttl: Instant) { + match self.sync_committee_subnets.entry(subnet_id) { + Entry::Vacant(_) => { + self.sync_committee_subnets.insert(subnet_id, min_ttl); + } + Entry::Occupied(old) => { + if *old.get() < min_ttl { + self.sync_committee_subnets.insert(subnet_id, min_ttl); + } + } + } + } + /* Notifications from the Swarm */ // A peer is being dialed. @@ -599,9 +624,9 @@ impl PeerManager { // if the sequence number is unknown send an update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data { - if meta_data.seq_number < seq { + if *meta_data.seq_number() < seq { debug!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq); + "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "ping_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { @@ -623,9 +648,9 @@ impl PeerManager { // if the sequence number is unknown send update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data { - if meta_data.seq_number < seq { + if *meta_data.seq_number() < seq { debug!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq); + "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "pong_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { @@ -643,19 +668,19 @@ impl PeerManager { pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data { - if known_meta_data.seq_number < meta_data.seq_number { + if *known_meta_data.seq_number() < *meta_data.seq_number() { debug!(self.log, "Updating peer's metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); } else { debug!(self.log, "Received old metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); // Updating metadata even in this case to prevent storing - // incorrect `metadata.attnets` for a peer + // incorrect `attnets/syncnets` for a peer } } else { // we have no meta-data for this peer, update debug!(self.log, "Obtained peer's metadata"; - "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); } peer_info.meta_data = Some(meta_data); } else { @@ -965,6 +990,46 @@ impl PeerManager { Ok(()) } + /// Run discovery query for additional sync committee peers if we fall below `TARGET_PEERS`. + fn maintain_sync_committee_peers(&mut self) { + // Remove expired entries + self.sync_committee_subnets + .retain(|_, v| *v > Instant::now()); + + let subnets_to_discover: Vec = self + .sync_committee_subnets + .iter() + .filter_map(|(k, v)| { + if self + .network_globals + .peers + .read() + .good_peers_on_subnet(Subnet::SyncCommittee(*k)) + .count() + < TARGET_SUBNET_PEERS + { + Some(SubnetDiscovery { + subnet: Subnet::SyncCommittee(*k), + min_ttl: Some(*v), + }) + } else { + None + } + }) + .collect(); + + // request the subnet query from discovery + if !subnets_to_discover.is_empty() { + debug!( + self.log, + "Making subnet queries for maintaining sync committee peers"; + "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() + ); + self.events + .push(PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover)); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -989,6 +1054,9 @@ impl PeerManager { // Updates peer's scores. self.update_peer_scores(); + // Maintain minimum count for sync committee peers. + self.maintain_sync_committee_peers(); + // Keep a list of peers we are disconnecting let mut disconnecting_peers = Vec::new(); @@ -1115,7 +1183,7 @@ mod tests { use super::*; use crate::discovery::enr::build_enr; use crate::discovery::enr_ext::CombinedKeyExt; - use crate::rpc::methods::MetaData; + use crate::rpc::methods::{MetaData, MetaDataV2}; use crate::Enr; use discv5::enr::CombinedKey; use slog::{o, Drain}; @@ -1156,10 +1224,11 @@ mod tests { enr, 9000, 9000, - MetaData { + MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), - }, + syncnets: Default::default(), + }), vec![], &log, ); diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index c9eeae947..717782901 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -1,8 +1,8 @@ use super::client::Client; use super::score::{PeerAction, Score, ScoreState}; use super::PeerSyncStatus; -use crate::rpc::MetaData; use crate::Multiaddr; +use crate::{rpc::MetaData, types::Subnet}; use discv5::Enr; use serde::{ ser::{SerializeStruct, Serializer}, @@ -12,7 +12,7 @@ use std::collections::HashSet; use std::net::{IpAddr, SocketAddr}; use std::time::Instant; use strum::AsRefStr; -use types::{EthSpec, SubnetId}; +use types::EthSpec; use PeerConnectionStatus::*; /// Information about a given connected peer. @@ -40,7 +40,7 @@ pub struct PeerInfo { /// connection. pub meta_data: Option>, /// Subnets the peer is connected to. - pub subnets: HashSet, + pub subnets: HashSet, /// The time we would like to retain this peer. After this time, the peer is no longer /// necessary. #[serde(skip)] @@ -84,17 +84,26 @@ impl PeerInfo { } } - /// Returns if the peer is subscribed to a given `SubnetId` from the metadata attnets field. - pub fn on_subnet_metadata(&self, subnet_id: SubnetId) -> bool { + /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. + pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { if let Some(meta_data) = &self.meta_data { - return meta_data.attnets.get(*subnet_id as usize).unwrap_or(false); + match subnet { + Subnet::Attestation(id) => { + return meta_data.attnets().get(**id as usize).unwrap_or(false) + } + Subnet::SyncCommittee(id) => { + return meta_data + .syncnets() + .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) + } + } } false } - /// Returns if the peer is subscribed to a given `SubnetId` from the gossipsub subscriptions. - pub fn on_subnet_gossipsub(&self, subnet_id: SubnetId) -> bool { - self.subnets.contains(&subnet_id) + /// Returns if the peer is subscribed to a given `Subnet` from the gossipsub subscriptions. + pub fn on_subnet_gossipsub(&self, subnet: &Subnet) -> bool { + self.subnets.contains(subnet) } /// Returns the seen IP addresses of the peer. diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 438980b9e..691600dd4 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -1,16 +1,19 @@ use super::peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use super::score::{Score, ScoreState}; -use crate::multiaddr::{Multiaddr, Protocol}; use crate::rpc::methods::MetaData; use crate::Enr; use crate::PeerId; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + types::Subnet, +}; use rand::seq::SliceRandom; use slog::{crit, debug, error, trace, warn}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::time::Instant; -use types::{EthSpec, SubnetId}; +use types::EthSpec; /// Max number of disconnected nodes to remember. const MAX_DC_PEERS: usize = 500; @@ -267,14 +270,14 @@ impl PeerDB { } /// Gives an iterator of all peers on a given subnet. - pub fn good_peers_on_subnet(&self, subnet_id: SubnetId) -> impl Iterator { + pub fn good_peers_on_subnet(&self, subnet: Subnet) -> impl Iterator { self.peers .iter() .filter(move |(_, info)| { // We check both the metadata and gossipsub data as we only want to count long-lived subscribed peers info.is_connected() - && info.on_subnet_metadata(subnet_id) - && info.on_subnet_gossipsub(subnet_id) + && info.on_subnet_metadata(&subnet) + && info.on_subnet_gossipsub(&subnet) && info.is_good_gossipsub_peer() }) .map(|(peer_id, _)| peer_id) @@ -382,11 +385,11 @@ impl PeerDB { /// Extends the ttl of all peers on the given subnet that have a shorter /// min_ttl than what's given. - pub fn extend_peers_on_subnet(&mut self, subnet_id: SubnetId, min_ttl: Instant) { + pub fn extend_peers_on_subnet(&mut self, subnet: &Subnet, min_ttl: Instant) { let log = &self.log; self.peers.iter_mut() .filter(move |(_, info)| { - info.is_connected() && info.on_subnet_metadata(subnet_id) && info.on_subnet_gossipsub(subnet_id) + info.is_connected() && info.on_subnet_metadata(subnet) && info.on_subnet_gossipsub(subnet) }) .for_each(|(peer_id,info)| { if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl { diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs index ff158067a..8b2df43ef 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs @@ -181,16 +181,18 @@ where mod tests { use super::super::ssz_snappy::*; use super::*; - use crate::rpc::methods::StatusMessage; use crate::rpc::protocol::*; - use snap::write::FrameEncoder; - use ssz::Encode; - use std::io::Write; - use types::{Epoch, Hash256, Slot}; + + use std::sync::Arc; + use types::{ForkContext, Hash256}; use unsigned_varint::codec::Uvi; type Spec = types::MainnetEthSpec; + fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &Spec::default_spec()) + } + #[test] fn test_decode_status_message() { let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap(); @@ -200,8 +202,9 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); + SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); // remove response code let mut snappy_buf = buf.clone(); @@ -233,8 +236,10 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + + let fork_context = Arc::new(fork_context()); let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); + SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); @@ -260,80 +265,34 @@ mod tests { // Response limits let limit = protocol_id.rpc_response_limits::(); let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let fork_context = Arc::new(fork_context()); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); // Request limits let limit = protocol_id.rpc_request_limits(); let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id, 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new(protocol_id, 1_048_576, fork_context); assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); } - - #[test] - fn test_decode_malicious_status_message() { - // 10 byte snappy stream identifier - let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; - - assert_eq!(stream_identifier.len(), 10); - - // byte 0(0xFE) is padding chunk type identifier for snappy messages - // byte 1,2,3 are chunk length (little endian) - let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; - - // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. - let status_message_bytes = StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), - finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), - head_slot: Slot::new(1), - } - .as_ssz_bytes(); - - assert_eq!(status_message_bytes.len(), 84); - assert_eq!(snap::raw::max_compress_len(status_message_bytes.len()), 130); - - let mut uvi_codec: Uvi = Uvi::default(); - let mut dst = BytesMut::with_capacity(1024); - - // Insert length-prefix - uvi_codec - .encode(status_message_bytes.len(), &mut dst) - .unwrap(); - - // Insert snappy stream identifier - dst.extend_from_slice(stream_identifier); - - // Insert malicious padding of 80 bytes. - for _ in 0..20 { - dst.extend_from_slice(malicious_padding); - } - - // Insert payload (42 bytes compressed) - let mut writer = FrameEncoder::new(Vec::new()); - writer.write_all(&status_message_bytes).unwrap(); - writer.flush().unwrap(); - assert_eq!(writer.get_ref().len(), 42); - dst.extend_from_slice(writer.get_ref()); - - // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. - - let snappy_protocol_id = - ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); - - let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); - - let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); - assert_eq!(snappy_decoded_message, RPCError::InvalidData); - } } diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs index b7deb959d..915572fd1 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs @@ -2,7 +2,8 @@ use crate::rpc::{ codec::base::OutboundCodec, protocol::{Encoding, Protocol, ProtocolId, RPCError, Version, ERROR_TYPE_MAX, ERROR_TYPE_MIN}, }; -use crate::rpc::{methods::*, InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; +use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; +use crate::{rpc::methods::*, EnrSyncCommitteeBitfield}; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; @@ -12,10 +13,16 @@ use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; use std::marker::PhantomData; +use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::{EthSpec, SignedBeaconBlock, SignedBeaconBlockBase}; +use types::{ + EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, +}; use unsigned_varint::codec::Uvi; +const CONTEXT_BYTES_LEN: usize = 4; + /* Inbound Codec */ pub struct SSZSnappyInboundCodec { @@ -24,11 +31,16 @@ pub struct SSZSnappyInboundCodec { len: Option, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, + fork_context: Arc, phantom: PhantomData, } impl SSZSnappyInboundCodec { - pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + pub fn new( + protocol: ProtocolId, + max_packet_size: usize, + fork_context: Arc, + ) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); @@ -38,6 +50,7 @@ impl SSZSnappyInboundCodec { protocol, len: None, phantom: PhantomData, + fork_context, max_packet_size, } } @@ -52,13 +65,38 @@ impl Encoder> for SSZSnappyInboundCodec< item: RPCCodedResponse, dst: &mut BytesMut, ) -> Result<(), Self::Error> { - let bytes = match item { - RPCCodedResponse::Success(resp) => match resp { + let bytes = match &item { + RPCCodedResponse::Success(resp) => match &resp { RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), - RPCResponse::MetaData(res) => res.as_ssz_bytes(), + RPCResponse::MetaData(res) => + // Encode the correct version of the MetaData response based on the negotiated version. + { + match self.protocol.version { + Version::V1 => MetaData::::V1(MetaDataV1 { + seq_number: *res.seq_number(), + attnets: res.attnets().clone(), + }) + .as_ssz_bytes(), + Version::V2 => { + // `res` is of type MetaDataV2, return the ssz bytes + if res.syncnets().is_ok() { + res.as_ssz_bytes() + } else { + // `res` is of type MetaDataV1, create a MetaDataV2 by adding a default syncnets field + // Note: This code path is redundant as `res` would be always of type MetaDataV2 + MetaData::::V2(MetaDataV2 { + seq_number: *res.seq_number(), + attnets: res.attnets().clone(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + .as_ssz_bytes() + } + } + } + } }, RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(), RPCCodedResponse::StreamTermination(_) => { @@ -71,6 +109,12 @@ impl Encoder> for SSZSnappyInboundCodec< "attempting to encode data > max_packet_size", )); } + + // Add context bytes if required + if let Some(ref context_bytes) = context_bytes(&self.protocol, &self.fork_context, &item) { + dst.extend_from_slice(context_bytes); + } + // Inserts the length prefix of the uncompressed bytes into dst // encoded as a unsigned varint self.inner @@ -93,18 +137,9 @@ impl Decoder for SSZSnappyInboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - // Note: length-prefix of > 10 bytes(uint64) would be a decoding error - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length); - length - } - None => return Ok(None), // need more bytes to decode length - } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -128,46 +163,9 @@ impl Decoder for SSZSnappyInboundCodec { self.len = None; let _read_bytes = src.split_to(n as usize); - // We need not check that decoded_buffer.len() is within bounds here - // since we have already checked `length` above. - match self.protocol.message_name { - Protocol::Status => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Status( - StatusMessage::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::Goodbye => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Goodbye( - GoodbyeReason::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::BlocksByRange => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::BlocksByRoot => match self.protocol.version { - Version::V1 => { - Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from_ssz_bytes(&decoded_buffer)?, - }))) - } - }, - Protocol::Ping => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Ping(Ping { - data: u64::from_ssz_bytes(&decoded_buffer)?, - }))), - }, - // This case should be unreachable as `MetaData` requests are handled separately in the `InboundUpgrade` - Protocol::MetaData => match self.protocol.version { - Version::V1 => { - if !decoded_buffer.is_empty() { - Err(RPCError::InvalidData) - } else { - Ok(Some(InboundRequest::MetaData(PhantomData))) - } - } - }, + match self.protocol.version { + Version::V1 => handle_v1_request(self.protocol.message_name, &decoded_buffer), + Version::V2 => handle_v2_request(self.protocol.message_name, &decoded_buffer), } } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), @@ -182,11 +180,18 @@ pub struct SSZSnappyOutboundCodec { protocol: ProtocolId, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, + /// The fork name corresponding to the received context bytes. + fork_name: Option, + fork_context: Arc, phantom: PhantomData, } impl SSZSnappyOutboundCodec { - pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + pub fn new( + protocol: ProtocolId, + max_packet_size: usize, + fork_context: Arc, + ) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); @@ -196,6 +201,8 @@ impl SSZSnappyOutboundCodec { protocol, max_packet_size, len: None, + fork_name: None, + fork_context, phantom: PhantomData, } } @@ -251,18 +258,23 @@ impl Decoder for SSZSnappyOutboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - // Note: length-prefix of > 10 bytes(uint64) would be a decoding error - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length as usize); - length - } - None => return Ok(None), // need more bytes to decode length + // Read the context bytes if required + if self.protocol.has_context_bytes() && self.fork_name.is_none() { + if src.len() >= CONTEXT_BYTES_LEN { + let context_bytes = src.split_to(CONTEXT_BYTES_LEN); + let mut result = [0; CONTEXT_BYTES_LEN]; + result.copy_from_slice(context_bytes.as_ref()); + self.fork_name = Some(context_bytes_to_fork_name( + result, + self.fork_context.clone(), + )?); + } else { + return Ok(None); } + } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -286,42 +298,13 @@ impl Decoder for SSZSnappyOutboundCodec { self.len = None; let _read_bytes = src.split_to(n as usize); - // We need not check that decoded_buffer.len() is within bounds here - // since we have already checked `length` above. - match self.protocol.message_name { - Protocol::Status => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::Status( - StatusMessage::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - // This case should be unreachable as `Goodbye` has no response. - Protocol::Goodbye => Err(RPCError::InvalidData), - Protocol::BlocksByRange => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( - // FIXME(altair): support Altair blocks - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( - &decoded_buffer, - )?), - )))), - }, - Protocol::BlocksByRoot => match self.protocol.version { - // FIXME(altair): support Altair blocks - Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( - &decoded_buffer, - )?), - )))), - }, - Protocol::Ping => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::Pong(Ping { - data: u64::from_ssz_bytes(&decoded_buffer)?, - }))), - }, - Protocol::MetaData => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( - &decoded_buffer, - )?))), - }, + match self.protocol.version { + Version::V1 => handle_v1_response(self.protocol.message_name, &decoded_buffer), + Version::V2 => handle_v2_response( + self.protocol.message_name, + &decoded_buffer, + &mut self.fork_name, + ), } } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), @@ -336,17 +319,9 @@ impl OutboundCodec> for SSZSnappyOutbound &mut self, src: &mut BytesMut, ) -> Result, RPCError> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length as usize); - length - } - None => return Ok(None), // need more bytes to decode length - } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -398,3 +373,739 @@ fn handle_error( _ => Err(err).map_err(RPCError::from), } } + +/// Returns `Some(context_bytes)` for encoding RPC responses that require context bytes. +/// Returns `None` when context bytes are not required. +fn context_bytes( + protocol: &ProtocolId, + fork_context: &ForkContext, + resp: &RPCCodedResponse, +) -> Option<[u8; CONTEXT_BYTES_LEN]> { + // Add the context bytes if required + if protocol.has_context_bytes() { + if let RPCCodedResponse::Success(RPCResponse::BlocksByRange(res)) = resp { + if let SignedBeaconBlock::Altair { .. } = **res { + // Altair context being `None` implies that "altair never happened". + // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. + return fork_context.to_context_bytes(ForkName::Altair); + } else if let SignedBeaconBlock::Base { .. } = **res { + return Some(fork_context.genesis_context_bytes()); + } + } + + if let RPCCodedResponse::Success(RPCResponse::BlocksByRoot(res)) = resp { + if let SignedBeaconBlock::Altair { .. } = **res { + // Altair context being `None` implies that "altair never happened". + // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. + return fork_context.to_context_bytes(ForkName::Altair); + } else if let SignedBeaconBlock::Base { .. } = **res { + return Some(fork_context.genesis_context_bytes()); + } + } + } + None +} + +/// Decodes the length-prefix from the bytes as an unsigned protobuf varint. +/// +/// Returns `Ok(Some(length))` by decoding the bytes if required. +/// Returns `Ok(None)` if more bytes are needed to decode the length-prefix. +/// Returns an `RPCError` for a decoding error. +fn handle_length( + uvi_codec: &mut Uvi, + len: &mut Option, + bytes: &mut BytesMut, +) -> Result, RPCError> { + if let Some(length) = len { + Ok(Some(*length)) + } else { + // Decode the length of the uncompressed bytes from an unsigned varint + // Note: length-prefix of > 10 bytes(uint64) would be a decoding error + match uvi_codec.decode(bytes).map_err(RPCError::from)? { + Some(length) => { + *len = Some(length as usize); + Ok(Some(length)) + } + None => Ok(None), // need more bytes to decode length + } + } +} + +/// Decodes a `Version::V1` `InboundRequest` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v1_request( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::Status => Ok(Some(InboundRequest::Status(StatusMessage::from_ssz_bytes( + decoded_buffer, + )?))), + Protocol::Goodbye => Ok(Some(InboundRequest::Goodbye( + GoodbyeReason::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( + BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }))), + Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { + data: u64::from_ssz_bytes(decoded_buffer)?, + }))), + + // MetaData requests return early from InboundUpgrade and do not reach the decoder. + // Handle this case just for completeness. + Protocol::MetaData => { + if !decoded_buffer.is_empty() { + Err(RPCError::InvalidData) + } else { + Ok(Some(InboundRequest::MetaData(PhantomData))) + } + } + } +} + +/// Decodes a `Version::V2` `InboundRequest` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v2_request( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( + BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }))), + // MetaData requests return early from InboundUpgrade and do not reach the decoder. + // Handle this case just for completeness. + Protocol::MetaData => { + if !decoded_buffer.is_empty() { + Err(RPCError::InvalidData) + } else { + Ok(Some(InboundRequest::MetaData(PhantomData))) + } + } + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!("{} does not support version 2", protocol), + )), + } +} + +/// Decodes a `Version::V1` `RPCResponse` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v1_response( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( + decoded_buffer, + )?))), + // This case should be unreachable as `Goodbye` has no response. + Protocol::Goodbye => Err(RPCError::InvalidData), + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { + data: u64::from_ssz_bytes(decoded_buffer)?, + }))), + Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( + MetaDataV1::from_ssz_bytes(decoded_buffer)?, + )))), + } +} + +/// Decodes a `Version::V2` `RPCResponse` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +/// +/// For BlocksByRange/BlocksByRoot reponses, decodes the appropriate response +/// according to the received `ForkName`. +fn handle_v2_response( + protocol: Protocol, + decoded_buffer: &[u8], + fork_name: &mut Option, +) -> Result>, RPCError> { + // MetaData does not contain context_bytes + if let Protocol::MetaData = protocol { + Ok(Some(RPCResponse::MetaData(MetaData::V2( + MetaDataV2::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + let fork_name = fork_name.take().ok_or_else(|| { + RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!("No context bytes provided for {} response", protocol), + ) + })?; + match protocol { + Protocol::BlocksByRange => match fork_name { + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( + decoded_buffer, + )?), + )))), + + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + }, + Protocol::BlocksByRoot => match fork_name { + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( + decoded_buffer, + )?), + )))), + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + }, + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid v2 request".to_string(), + )), + } + } +} + +/// Takes the context bytes and a fork_context and returns the corresponding fork_name. +fn context_bytes_to_fork_name( + context_bytes: [u8; CONTEXT_BYTES_LEN], + fork_context: Arc, +) -> Result { + fork_context + .from_context_bytes(context_bytes) + .cloned() + .ok_or_else(|| { + RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Context bytes does not correspond to a valid fork".to_string(), + ) + }) +} +#[cfg(test)] +mod tests { + + use super::*; + use crate::rpc::{protocol::*, MetaData}; + use crate::{ + rpc::{methods::StatusMessage, Ping, RPCResponseErrorCode}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, + }; + use std::sync::Arc; + use types::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, ForkContext, Hash256, Signature, + SignedBeaconBlock, Slot, + }; + + use snap::write::FrameEncoder; + use ssz::Encode; + use std::io::Write; + + type Spec = types::MainnetEthSpec; + + fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &Spec::default_spec()) + } + + fn base_block() -> SignedBeaconBlock { + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&Spec::default_spec())); + SignedBeaconBlock::from_block(full_block, Signature::empty()) + } + + fn altair_block() -> SignedBeaconBlock { + let full_block = + BeaconBlock::Altair(BeaconBlockAltair::::full(&Spec::default_spec())); + SignedBeaconBlock::from_block(full_block, Signature::empty()) + } + + fn status_message() -> StatusMessage { + StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + } + } + + fn ping_message() -> Ping { + Ping { data: 1 } + } + + fn metadata() -> MetaData { + MetaData::V1(MetaDataV1 { + seq_number: 1, + attnets: EnrAttestationBitfield::::default(), + }) + } + + fn metadata_v2() -> MetaData { + MetaData::V2(MetaDataV2 { + seq_number: 1, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + } + + /// Encodes the given protocol response as bytes. + fn encode( + protocol: Protocol, + version: Version, + message: RPCCodedResponse, + ) -> Result { + let max_packet_size = 1_048_576; + let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); + + let mut buf = BytesMut::new(); + let mut snappy_inbound_codec = + SSZSnappyInboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); + + snappy_inbound_codec.encode(message, &mut buf)?; + Ok(buf) + } + + /// Attempts to decode the given protocol bytes as an rpc response + fn decode( + protocol: Protocol, + version: Version, + message: &mut BytesMut, + ) -> Result>, RPCError> { + let max_packet_size = 1_048_576; + let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); + let mut snappy_outbound_codec = + SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); + // decode message just as snappy message + snappy_outbound_codec.decode(message) + } + + /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. + fn encode_then_decode( + protocol: Protocol, + version: Version, + message: RPCCodedResponse, + ) -> Result>, RPCError> { + let mut encoded = encode(protocol, version.clone(), message)?; + decode(protocol, version, &mut encoded) + } + + // Test RPCResponse encoding/decoding for V1 messages + #[test] + fn test_encode_then_decode_v1() { + assert_eq!( + encode_then_decode( + Protocol::Status, + Version::V1, + RPCCodedResponse::Success(RPCResponse::Status(status_message())) + ), + Ok(Some(RPCResponse::Status(status_message()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::Ping, + Version::V1, + RPCCodedResponse::Success(RPCResponse::Pong(ping_message())) + ), + Ok(Some(RPCResponse::Pong(ping_message()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + ); + + assert!( + matches!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + ) + .unwrap_err(), + RPCError::SSZDecodeError(_) + ), + "altair block cannot be decoded with blocks by range V1 version" + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + ); + + assert!( + matches!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ) + .unwrap_err(), + RPCError::SSZDecodeError(_) + ), + "altair block cannot be decoded with blocks by range V1 version" + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + + // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + } + + // Test RPCResponse encoding/decoding for V1 messages + #[test] + fn test_encode_then_decode_v2() { + assert!( + matches!( + encode_then_decode( + Protocol::Status, + Version::V2, + RPCCodedResponse::Success(RPCResponse::Status(status_message())), + ) + .unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + ), + "status does not have V2 message" + ); + + assert!( + matches!( + encode_then_decode( + Protocol::Ping, + Version::V2, + RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + ) + .unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + ), + "ping does not have V2 message" + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + ); + + // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())) + ), + Ok(Some(RPCResponse::MetaData(metadata_v2()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())) + ), + Ok(Some(RPCResponse::MetaData(metadata_v2()))) + ); + } + + // Test RPCResponse encoding/decoding for V2 messages + #[test] + fn test_context_bytes_v2() { + let fork_context = fork_context(); + + // Removing context bytes for v2 messages should error + let mut encoded_bytes = encode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + ) + .unwrap(); + + let _ = encoded_bytes.split_to(4); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let _ = encoded_bytes.split_to(4); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + // Trying to decode a base block with altair context bytes should give ssz decoding error + let mut encoded_bytes = encode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes + .extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::SSZDecodeError(_), + )); + + // Trying to decode an altair block with base context bytes should give ssz decoding error + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Base).unwrap()); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::SSZDecodeError(_), + )); + + // Adding context bytes to Protocols that don't require it should return an error + let mut encoded_bytes = BytesMut::new(); + encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + encoded_bytes.extend_from_slice( + &encode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ) + .unwrap(), + ); + + assert!(decode(Protocol::MetaData, Version::V2, &mut encoded_bytes).is_err()); + + // Sending context bytes which do not correspond to any fork should return an error + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes.extend_from_slice(&[42, 42, 42, 42]); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let mut part = encoded_bytes.split_to(3); + + assert_eq!( + decode(Protocol::BlocksByRange, Version::V2, &mut part), + Ok(None) + ) + } + + /// Test a malicious snappy encoding for a V1 `Status` message where the attacker + /// sends a valid message filled with a stream of useless padding before the actual message. + #[test] + fn test_decode_malicious_v1_message() { + // 10 byte snappy stream identifier + let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; + + assert_eq!(stream_identifier.len(), 10); + + // byte 0(0xFE) is padding chunk type identifier for snappy messages + // byte 1,2,3 are chunk length (little endian) + let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; + + // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. + let status_message_bytes = StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + } + .as_ssz_bytes(); + + assert_eq!(status_message_bytes.len(), 84); + assert_eq!(snap::raw::max_compress_len(status_message_bytes.len()), 130); + + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + + // Insert length-prefix + uvi_codec + .encode(status_message_bytes.len(), &mut dst) + .unwrap(); + + // Insert snappy stream identifier + dst.extend_from_slice(stream_identifier); + + // Insert malicious padding of 80 bytes. + for _ in 0..20 { + dst.extend_from_slice(malicious_padding); + } + + // Insert payload (42 bytes compressed) + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&status_message_bytes).unwrap(); + writer.flush().unwrap(); + assert_eq!(writer.get_ref().len(), 42); + dst.extend_from_slice(writer.get_ref()); + + // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. + assert_eq!( + decode(Protocol::Status, Version::V1, &mut dst).unwrap_err(), + RPCError::InvalidData + ); + } + + /// Test a malicious snappy encoding for a V2 `BlocksByRange` message where the attacker + /// sends a valid message filled with a stream of useless padding before the actual message. + #[test] + fn test_decode_malicious_v2_message() { + let fork_context = Arc::new(fork_context()); + + // 10 byte snappy stream identifier + let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; + + assert_eq!(stream_identifier.len(), 10); + + // byte 0(0xFE) is padding chunk type identifier for snappy messages + // byte 1,2,3 are chunk length (little endian) + let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; + + // Full altair block is 157916 bytes uncompressed. `max_compressed_len` is 32 + 157916 + 157916/6 = 184267. + let block_message_bytes = altair_block().as_ssz_bytes(); + + assert_eq!(block_message_bytes.len(), 157916); + assert_eq!( + snap::raw::max_compress_len(block_message_bytes.len()), + 184267 + ); + + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + + // Insert context bytes + dst.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + + // Insert length-prefix + uvi_codec + .encode(block_message_bytes.len(), &mut dst) + .unwrap(); + + // Insert snappy stream identifier + dst.extend_from_slice(stream_identifier); + + // Insert malicious padding of 176156 bytes. + for _ in 0..44039 { + dst.extend_from_slice(malicious_padding); + } + + // Insert payload (8103 bytes compressed) + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&block_message_bytes).unwrap(); + writer.flush().unwrap(); + assert_eq!(writer.get_ref().len(), 8103); + dst.extend_from_slice(writer.get_ref()); + + // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. + assert_eq!( + decode(Protocol::BlocksByRange, Version::V2, &mut dst).unwrap_err(), + RPCError::InvalidData + ); + } +} diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 554e6787f..506093ee6 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -4,6 +4,7 @@ use super::methods::{ GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination, }; +use super::outbound::OutboundRequestContainer; use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; @@ -23,12 +24,13 @@ use smallvec::SmallVec; use std::{ collections::hash_map::Entry, pin::Pin, + sync::Arc, task::{Context, Poll}, time::Duration, }; use tokio::time::{sleep_until, Instant as TInstant, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; -use types::EthSpec; +use types::{EthSpec, ForkContext}; /// The time (in seconds) before a substream that is awaiting a response from the user times out. pub const RESPONSE_TIMEOUT: u64 = 10; @@ -126,6 +128,9 @@ where /// This keeps track of the number of attempts. outbound_io_error_retries: u8, + /// Fork specific info. + fork_context: Arc, + /// Logger for handling RPC streams log: slog::Logger, } @@ -203,6 +208,7 @@ where { pub fn new( listen_protocol: SubstreamProtocol, ()>, + fork_context: Arc, log: &slog::Logger, ) -> Self { RPCHandler { @@ -219,6 +225,7 @@ where state: HandlerState::Active, max_dial_negotiated: 8, outbound_io_error_retries: 0, + fork_context, log: log.clone(), } } @@ -308,7 +315,7 @@ where type OutEvent = HandlerEvent; type Error = RPCError; type InboundProtocol = RPCProtocol; - type OutboundProtocol = OutboundRequest; + type OutboundProtocol = OutboundRequestContainer; type OutboundOpenInfo = (RequestId, OutboundRequest); // Keep track of the id and the request type InboundOpenInfo = (); @@ -874,7 +881,14 @@ where let (id, req) = self.dial_queue.remove(0); self.dial_queue.shrink_to_fit(); return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(req.clone(), ()).map_info(|()| (id, req)), + protocol: SubstreamProtocol::new( + OutboundRequestContainer { + req: req.clone(), + fork_context: self.fork_context.clone(), + }, + (), + ) + .map_info(|()| (id, req)), }); } diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index e24b6e980..b2be19647 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -1,6 +1,6 @@ //! Available RPC methods types and ids. -use crate::types::EnrBitfield; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use regex::bytes::Regex; use serde::Serialize; use ssz_derive::{Decode, Encode}; @@ -10,6 +10,7 @@ use ssz_types::{ }; use std::ops::Deref; use strum::AsStaticStr; +use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Maximum number of blocks in a single request. @@ -93,13 +94,23 @@ pub struct Ping { } /// The METADATA response structure. -#[derive(Encode, Decode, Clone, Debug, PartialEq, Serialize)] +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Encode, Decode, Clone, Debug, PartialEq, Serialize), + serde(bound = "T: EthSpec", deny_unknown_fields), + ) +)] +#[derive(Clone, Debug, PartialEq, Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct MetaData { /// A sequential counter indicating when data gets modified. pub seq_number: u64, - /// The persistent subnet bitfield. - pub attnets: EnrBitfield, + /// The persistent attestation subnet bitfield. + pub attnets: EnrAttestationBitfield, + /// The persistent sync committee bitfield. + #[superstruct(only(V2))] + pub syncnets: EnrSyncCommitteeBitfield, } /// The reason given for a `Goodbye` message. @@ -360,7 +371,7 @@ impl std::fmt::Display for RPCResponse { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), - RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number), + RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), } } } diff --git a/beacon_node/eth2_libp2p/src/rpc/mod.rs b/beacon_node/eth2_libp2p/src/rpc/mod.rs index 702e3e20d..96fa23506 100644 --- a/beacon_node/eth2_libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2_libp2p/src/rpc/mod.rs @@ -15,12 +15,13 @@ use libp2p::{Multiaddr, PeerId}; use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; -use types::EthSpec; +use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; -pub(crate) use methods::{MetaData, Ping, RPCCodedResponse, RPCResponse}; +pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub use handler::SubstreamId; @@ -101,12 +102,13 @@ pub struct RPC { limiter: RateLimiter, /// Queue of events to be processed. events: Vec, RPCMessage>>, + fork_context: Arc, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(log: slog::Logger) -> Self { + pub fn new(fork_context: Arc, log: slog::Logger) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) @@ -124,6 +126,7 @@ impl RPC { RPC { limiter, events: Vec::new(), + fork_context, log, } } @@ -182,10 +185,12 @@ where RPCHandler::new( SubstreamProtocol::new( RPCProtocol { + fork_context: self.fork_context.clone(), phantom: PhantomData, }, (), ), + self.fork_context.clone(), &self.log, ) } diff --git a/beacon_node/eth2_libp2p/src/rpc/outbound.rs b/beacon_node/eth2_libp2p/src/rpc/outbound.rs index b9dbd08b5..891125534 100644 --- a/beacon_node/eth2_libp2p/src/rpc/outbound.rs +++ b/beacon_node/eth2_libp2p/src/rpc/outbound.rs @@ -14,16 +14,23 @@ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, SinkExt}; use libp2p::core::{OutboundUpgrade, UpgradeInfo}; +use std::sync::Arc; use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::EthSpec; +use types::{EthSpec, ForkContext}; /* Outbound request */ // Combines all the RPC requests into a single enum to implement `UpgradeInfo` and // `OutboundUpgrade` +#[derive(Debug, Clone)] +pub struct OutboundRequestContainer { + pub req: OutboundRequest, + pub fork_context: Arc, +} + #[derive(Debug, Clone, PartialEq)] pub enum OutboundRequest { Status(StatusMessage), @@ -34,13 +41,13 @@ pub enum OutboundRequest { MetaData(PhantomData), } -impl UpgradeInfo for OutboundRequest { +impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; // add further protocols as we support more encodings/versions fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() + self.req.supported_protocols() } } @@ -59,26 +66,23 @@ impl OutboundRequest { Version::V1, Encoding::SSZSnappy, )], - OutboundRequest::BlocksByRange(_) => vec![ProtocolId::new( - Protocol::BlocksByRange, - Version::V1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlocksByRoot(_) => vec![ProtocolId::new( - Protocol::BlocksByRoot, - Version::V1, - Encoding::SSZSnappy, - )], + OutboundRequest::BlocksByRange(_) => vec![ + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ], + OutboundRequest::BlocksByRoot(_) => vec![ + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], OutboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, Encoding::SSZSnappy, )], - OutboundRequest::MetaData(_) => vec![ProtocolId::new( - Protocol::MetaData, - Version::V1, - Encoding::SSZSnappy, - )], + OutboundRequest::MetaData(_) => vec![ + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ], } } @@ -130,7 +134,7 @@ impl OutboundRequest { pub type OutboundFramed = Framed, OutboundCodec>; -impl OutboundUpgrade for OutboundRequest +impl OutboundUpgrade for OutboundRequestContainer where TSpec: EthSpec + Send + 'static, TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, @@ -147,6 +151,7 @@ where let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new( protocol, usize::max_value(), + self.fork_context.clone(), )); OutboundCodec::SSZSnappy(ssz_snappy_codec) } @@ -155,7 +160,7 @@ where let mut socket = Framed::new(socket, codec); async { - socket.send(self).await?; + socket.send(self.req).await?; socket.close().await?; Ok(socket) } diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index 031246ba1..b85e48d31 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -12,6 +12,7 @@ use ssz::Encode; use ssz_types::VariableList; use std::io; use std::marker::PhantomData; +use std::sync::Arc; use std::time::Duration; use strum::{AsStaticRef, AsStaticStr}; use tokio_io_timeout::TimeoutStream; @@ -19,19 +20,35 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock}; +use types::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, EthSpec, ForkContext, Hash256, MainnetEthSpec, + Signature, SignedBeaconBlock, +}; lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is // same across different `EthSpec` implementations. - pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::empty(&MainnetEthSpec::default_spec()), + pub static ref SIGNED_BEACON_BLOCK_BASE_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::::empty(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() .len(); - pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::::from_block( - BeaconBlock::full(&MainnetEthSpec::default_spec()), + pub static ref SIGNED_BEACON_BLOCK_BASE_MAX: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + + pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Altair(BeaconBlockAltair::::empty(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MAX: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Altair(BeaconBlockAltair::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() @@ -95,6 +112,8 @@ pub enum Protocol { pub enum Version { /// Version 1 of RPC V1, + /// Version 2 of RPC + V2, } /// RPC Encondings supported. @@ -130,6 +149,7 @@ impl std::fmt::Display for Version { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { Version::V1 => "1", + Version::V2 => "2", }; f.write_str(repr) } @@ -137,6 +157,7 @@ impl std::fmt::Display for Version { #[derive(Debug, Clone)] pub struct RPCProtocol { + pub fork_context: Arc, pub phantom: PhantomData, } @@ -149,9 +170,13 @@ impl UpgradeInfo for RPCProtocol { vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + // V2 variants have higher preference then V1 + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ] } @@ -226,22 +251,49 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response - Protocol::BlocksByRange => { - RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) - } - Protocol::BlocksByRoot => { - RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) - } + Protocol::BlocksByRange => RpcLimits::new( + std::cmp::min( + *SIGNED_BEACON_BLOCK_ALTAIR_MIN, + *SIGNED_BEACON_BLOCK_BASE_MIN, + ), + std::cmp::max( + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, + *SIGNED_BEACON_BLOCK_BASE_MAX, + ), + ), + Protocol::BlocksByRoot => RpcLimits::new( + std::cmp::min( + *SIGNED_BEACON_BLOCK_ALTAIR_MIN, + *SIGNED_BEACON_BLOCK_BASE_MIN, + ), + std::cmp::max( + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, + *SIGNED_BEACON_BLOCK_BASE_MAX, + ), + ), + Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), ), Protocol::MetaData => RpcLimits::new( - as Encode>::ssz_fixed_len(), - as Encode>::ssz_fixed_len(), + as Encode>::ssz_fixed_len(), + as Encode>::ssz_fixed_len(), ), } } + + /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the + /// beginning of the stream, else returns `false`. + pub fn has_context_bytes(&self) -> bool { + if self.version == Version::V2 { + match self.message_name { + Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, + _ => return false, + } + } + false + } } /// An RPC protocol ID. @@ -292,8 +344,11 @@ where let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { - let ssz_snappy_codec = - BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE)); + let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new( + protocol, + MAX_RPC_SIZE, + self.fork_context.clone(), + )); InboundCodec::SSZSnappy(ssz_snappy_codec) } }; @@ -359,26 +414,25 @@ impl InboundRequest { Version::V1, Encoding::SSZSnappy, )], - InboundRequest::BlocksByRange(_) => vec![ProtocolId::new( - Protocol::BlocksByRange, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRoot(_) => vec![ProtocolId::new( - Protocol::BlocksByRoot, - Version::V1, - Encoding::SSZSnappy, - )], + InboundRequest::BlocksByRange(_) => vec![ + // V2 has higher preference when negotiating a stream + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ], + InboundRequest::BlocksByRoot(_) => vec![ + // V2 has higher preference when negotiating a stream + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], InboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, Encoding::SSZSnappy, )], - InboundRequest::MetaData(_) => vec![ProtocolId::new( - Protocol::MetaData, - Version::V1, - Encoding::SSZSnappy, - )], + InboundRequest::MetaData(_) => vec![ + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ], } } @@ -424,8 +478,6 @@ impl InboundRequest { } } -/* RPC Response type - used for outbound upgrades */ - /// Error in RPC Encoding/Decoding. #[derive(Debug, Clone, PartialEq, AsStaticStr)] #[strum(serialize_all = "snake_case")] diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index f19e6ffe6..c5291bbdf 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -3,8 +3,10 @@ use crate::behaviour::{ }; use crate::discovery::enr; use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, RPCResponseErrorCode, RequestId}; -use crate::types::{error, EnrBitfield, GossipKind}; +use crate::rpc::{ + GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, RequestId, +}; +use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind}; use crate::EnrExt; use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; use futures::prelude::*; @@ -25,7 +27,7 @@ use std::io::prelude::*; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec}; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR}; @@ -66,6 +68,7 @@ impl Service { config: &NetworkConfig, enr_fork_id: EnrForkId, log: &Logger, + fork_context: Arc, chain_spec: &ChainSpec, ) -> error::Result<(Arc>, Self)> { let log = log.new(o!("service"=> "libp2p")); @@ -112,9 +115,10 @@ impl Service { // Lighthouse network behaviour let behaviour = Behaviour::new( &local_keypair, - config, + config.clone(), network_globals.clone(), &log, + fork_context, chain_spec, ) .await?; @@ -547,37 +551,57 @@ fn load_or_build_metadata( network_dir: &std::path::Path, log: &slog::Logger, ) -> MetaData { - // Default metadata - let mut meta_data = MetaData { + // We load a V2 metadata version by default (regardless of current fork) + // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // for sending the correct metadata version based on the negotiated protocol version. + let mut meta_data = MetaDataV2 { seq_number: 0, - attnets: EnrBitfield::::default(), + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), }; // Read metadata from persisted file if available let metadata_path = network_dir.join(METADATA_FILENAME); if let Ok(mut metadata_file) = File::open(metadata_path) { let mut metadata_ssz = Vec::new(); if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - match MetaData::::from_ssz_bytes(&metadata_ssz) { + // Attempt to read a MetaDataV2 version from the persisted file, + // if that fails, read MetaDataV1 + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { Ok(persisted_metadata) => { meta_data.seq_number = persisted_metadata.seq_number; // Increment seq number if persisted attnet is not default - if persisted_metadata.attnets != meta_data.attnets { + if persisted_metadata.attnets != meta_data.attnets + || persisted_metadata.syncnets != meta_data.syncnets + { meta_data.seq_number += 1; } debug!(log, "Loaded metadata from disk"); } - Err(e) => { - debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, - ); + Err(_) => { + match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + let persisted_metadata = MetaData::V1(persisted_metadata); + // Increment seq number as the persisted metadata version is updated + meta_data.seq_number = *persisted_metadata.seq_number() + 1; + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => ?e, + ); + } + } } } } }; - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number); + // Wrap the MetaData + let meta_data = MetaData::V2(meta_data); + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); save_metadata_to_disk(network_dir, meta_data.clone(), log); meta_data } diff --git a/beacon_node/eth2_libp2p/src/types/mod.rs b/beacon_node/eth2_libp2p/src/types/mod.rs index 156e6a1d7..1d045bb38 100644 --- a/beacon_node/eth2_libp2p/src/types/mod.rs +++ b/beacon_node/eth2_libp2p/src/types/mod.rs @@ -7,13 +7,13 @@ mod topics; use types::{BitVector, EthSpec}; -#[allow(type_alias_bounds)] -pub type EnrBitfield = BitVector; +pub type EnrAttestationBitfield = BitVector<::SubnetBitfieldLength>; +pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSubnetCount>; pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; pub use pubsub::{PubsubMessage, SnappyTransform}; -pub use subnet::SubnetDiscovery; +pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::SyncState; -pub use topics::{subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; +pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; diff --git a/beacon_node/eth2_libp2p/src/types/pubsub.rs b/beacon_node/eth2_libp2p/src/types/pubsub.rs index f1ba98705..75ef6e8ab 100644 --- a/beacon_node/eth2_libp2p/src/types/pubsub.rs +++ b/beacon_node/eth2_libp2p/src/types/pubsub.rs @@ -7,10 +7,10 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; -use types::SubnetId; use types::{ - Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockBase, SignedVoluntaryExit, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -27,6 +27,10 @@ pub enum PubsubMessage { ProposerSlashing(Box), /// Gossipsub message providing notification of a new attester slashing. AttesterSlashing(Box>), + /// Gossipsub message providing notification of partially aggregated sync committee signatures. + SignedContributionAndProof(Box>), + /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. + SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -107,6 +111,8 @@ impl PubsubMessage { PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit, PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing, PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, + PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, + PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), } } @@ -114,7 +120,11 @@ impl PubsubMessage { /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will * need to be modified. */ - pub fn decode(topic: &TopicHash, data: &[u8]) -> Result { + pub fn decode( + topic: &TopicHash, + data: &[u8], + fork_context: &ForkContext, + ) -> Result { match GossipTopic::decode(topic.as_str()) { Err(_) => Err(format!("Unknown gossipsub topic: {:?}", topic)), Ok(gossip_topic) => { @@ -141,11 +151,23 @@ impl PubsubMessage { )))) } GossipKind::BeaconBlock => { - // FIXME(altair): support Altair blocks - let beacon_block = SignedBeaconBlock::Base( - SignedBeaconBlockBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ); + let beacon_block = + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Base) => SignedBeaconBlock::::Base( + SignedBeaconBlockBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Altair) => SignedBeaconBlock::::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) } GossipKind::VoluntaryExit => { @@ -163,6 +185,21 @@ impl PubsubMessage { .map_err(|e| format!("{:?}", e))?; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) } + GossipKind::SignedContributionAndProof => { + let sync_aggregate = SignedContributionAndProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::SignedContributionAndProof(Box::new( + sync_aggregate, + ))) + } + GossipKind::SyncCommitteeMessage(subnet_id) => { + let sync_committee = SyncCommitteeMessage::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::SyncCommitteeMessage(Box::new(( + *subnet_id, + sync_committee, + )))) + } } } } @@ -182,6 +219,8 @@ impl PubsubMessage { PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(), PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), + PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), + PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), } } } @@ -210,6 +249,12 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"), + PubsubMessage::SignedContributionAndProof(_) => { + write!(f, "Signed Contribution and Proof") + } + PubsubMessage::SyncCommitteeMessage(data) => { + write!(f, "Sync committee message: subnet_id: {}", *data.0) + } } } } diff --git a/beacon_node/eth2_libp2p/src/types/subnet.rs b/beacon_node/eth2_libp2p/src/types/subnet.rs index 847a63b60..50d28542b 100644 --- a/beacon_node/eth2_libp2p/src/types/subnet.rs +++ b/beacon_node/eth2_libp2p/src/types/subnet.rs @@ -1,9 +1,28 @@ +use serde::Serialize; use std::time::Instant; -use types::SubnetId; +use types::{SubnetId, SyncSubnetId}; + +/// Represents a subnet on an attestation or sync committee `SubnetId`. +/// +/// Used for subscribing to the appropriate gossipsub subnets and mark +/// appropriate metadata bitfields. +#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, Hash)] +pub enum Subnet { + /// Represents a gossipsub attestation subnet and the metadata `attnets` field. + Attestation(SubnetId), + /// Represents a gossipsub sync committee subnet and the metadata `syncnets` field. + SyncCommittee(SyncSubnetId), +} /// A subnet to discover peers on along with the instant after which it's no longer useful. #[derive(Debug, Clone)] pub struct SubnetDiscovery { - pub subnet_id: SubnetId, + pub subnet: Subnet, pub min_ttl: Option, } + +impl PartialEq for SubnetDiscovery { + fn eq(&self, other: &SubnetDiscovery) -> bool { + self.subnet.eq(&other.subnet) + } +} diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index 6bacfcf38..f9860a003 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -1,7 +1,9 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; use serde_derive::{Deserialize, Serialize}; use strum::AsRefStr; -use types::SubnetId; +use types::{SubnetId, SyncSubnetId}; + +use crate::Subnet; /// The gossipsub topic names. // These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX @@ -14,13 +16,16 @@ pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; +pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; +pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; -pub const CORE_TOPICS: [GossipKind; 5] = [ +pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::BeaconBlock, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, GossipKind::AttesterSlashing, + GossipKind::SignedContributionAndProof, ]; /// A gossipsub topic which encapsulates the type of messages that should be sent and received over @@ -30,7 +35,7 @@ pub struct GossipTopic { /// The encoding of the topic. encoding: GossipEncoding, /// The fork digest of the topic, - fork_digest: [u8; 4], + pub fork_digest: [u8; 4], /// The kind of topic. kind: GossipKind, } @@ -53,12 +58,20 @@ pub enum GossipKind { ProposerSlashing, /// Topic for publishing attester slashings. AttesterSlashing, + /// Topic for publishing partially aggregated sync committee signatures. + SignedContributionAndProof, + /// Topic for publishing unaggregated sync committee signatures on a particular subnet. + #[strum(serialize = "sync_committee")] + SyncCommitteeMessage(SyncSubnetId), } impl std::fmt::Display for GossipKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { GossipKind::Attestation(subnet_id) => write!(f, "beacon_attestation_{}", **subnet_id), + GossipKind::SyncCommitteeMessage(subnet_id) => { + write!(f, "sync_committee_{}", **subnet_id) + } x => f.write_str(x.as_ref()), } } @@ -124,11 +137,15 @@ impl GossipTopic { let kind = match topic_parts[3] { BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, + SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof, VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, topic => match committee_topic_index(topic) { - Some(subnet_id) => GossipKind::Attestation(subnet_id), + Some(subnet) => match subnet { + Subnet::Attestation(s) => GossipKind::Attestation(s), + Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), + }, None => return Err(format!("Unknown topic: {}", topic)), }, }; @@ -163,6 +180,10 @@ impl From for String { GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), + GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), + GossipKind::SyncCommitteeMessage(index) => { + format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) + } }; format!( "/{}/{}/{}/{}", @@ -174,32 +195,72 @@ impl From for String { } } -impl From for GossipKind { - fn from(subnet_id: SubnetId) -> Self { - GossipKind::Attestation(subnet_id) +impl std::fmt::Display for GossipTopic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let encoding = match self.encoding { + GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, + }; + + let kind = match self.kind { + GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), + GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), + GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), + GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), + GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), + GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), + GossipKind::SyncCommitteeMessage(index) => { + format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) + } + }; + write!( + f, + "/{}/{}/{}/{}", + TOPIC_PREFIX, + hex::encode(self.fork_digest), + kind, + encoding + ) + } +} + +impl From for GossipKind { + fn from(subnet_id: Subnet) -> Self { + match subnet_id { + Subnet::Attestation(s) => GossipKind::Attestation(s), + Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), + } } } // helper functions /// Get subnet id from an attestation subnet topic hash. -pub fn subnet_id_from_topic_hash(topic_hash: &TopicHash) -> Option { +pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option { let gossip_topic = GossipTopic::decode(topic_hash.as_str()).ok()?; - if let GossipKind::Attestation(subnet_id) = gossip_topic.kind() { - return Some(*subnet_id); + match gossip_topic.kind() { + GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)), + GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)), + _ => None, } - None } -// Determines if a string is a committee topic. -fn committee_topic_index(topic: &str) -> Option { +// Determines if a string is an attestation or sync committee topic. +fn committee_topic_index(topic: &str) -> Option { if topic.starts_with(BEACON_ATTESTATION_PREFIX) { - return Some(SubnetId::new( + return Some(Subnet::Attestation(SubnetId::new( topic .trim_start_matches(BEACON_ATTESTATION_PREFIX) .parse::() .ok()?, - )); + ))); + } else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) { + return Some(Subnet::SyncCommittee(SyncSubnetId::new( + topic + .trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC) + .parse::() + .ok()?, + ))); } None } @@ -222,7 +283,9 @@ mod tests { for kind in [ BeaconBlock, BeaconAggregateAndProof, + SignedContributionAndProof, Attestation(SubnetId::new(42)), + SyncCommitteeMessage(SyncSubnetId::new(42)), VoluntaryExit, ProposerSlashing, AttesterSlashing, @@ -292,14 +355,20 @@ mod tests { } #[test] - fn test_subnet_id_from_topic_hash() { + fn test_subnet_from_topic_hash() { let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_block/ssz_snappy"); - assert!(subnet_id_from_topic_hash(&topic_hash).is_none()); + assert!(subnet_from_topic_hash(&topic_hash).is_none()); let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_attestation_42/ssz_snappy"); assert_eq!( - subnet_id_from_topic_hash(&topic_hash), - Some(SubnetId::new(42)) + subnet_from_topic_hash(&topic_hash), + Some(Subnet::Attestation(SubnetId::new(42))) + ); + + let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/sync_committee_42/ssz_snappy"); + assert_eq!( + subnet_from_topic_hash(&topic_hash), + Some(Subnet::SyncCommittee(SyncSubnetId::new(42))) ); } @@ -314,6 +383,11 @@ mod tests { "beacon_attestation", Attestation(SubnetId::new(42)).as_ref() ); + + assert_eq!( + "sync_committee", + SyncCommitteeMessage(SyncSubnetId::new(42)).as_ref() + ); assert_eq!("voluntary_exit", VoluntaryExit.as_ref()); assert_eq!("proposer_slashing", ProposerSlashing.as_ref()); assert_eq!("attester_slashing", AttesterSlashing.as_ref()); diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index 1f6062428..8c28512d0 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -7,14 +7,20 @@ use eth2_libp2p::{Libp2pEvent, NetworkConfig}; use libp2p::gossipsub::GossipsubConfigBuilder; use slog::{debug, error, o, Drain}; use std::net::{TcpListener, UdpSocket}; +use std::sync::Arc; use std::sync::Weak; use std::time::Duration; use tokio::runtime::Runtime; -use types::{ChainSpec, EnrForkId, MinimalEthSpec}; +use types::{ChainSpec, EnrForkId, ForkContext, Hash256, MinimalEthSpec}; type E = MinimalEthSpec; use tempfile::Builder as TempBuilder; +/// Returns a dummy fork context +fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &ChainSpec::minimal()) +} + pub struct Libp2pInstance(LibP2PService, exit_future::Signal); impl std::ops::Deref for Libp2pInstance { @@ -109,12 +115,14 @@ pub async fn build_libp2p_instance( let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); + let fork_context = Arc::new(fork_context()); Libp2pInstance( LibP2PService::new( executor, &config, EnrForkId::default(), &log, + fork_context, &ChainSpec::minimal(), ) .await diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index d621bf31c..9d1faf748 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -8,7 +8,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, EthSpec, Hash256, MinimalEthSpec, + Signature, SignedBeaconBlock, Slot, }; mod common; @@ -500,9 +501,13 @@ fn test_blocks_by_root_chunked_rpc() { }); // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + + let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); + let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -525,7 +530,11 @@ fn test_blocks_by_root_chunked_rpc() { response, }) => match response { Response::BlocksByRoot(Some(_)) => { - assert_eq!(response, rpc_response.clone()); + if messages_received < 5 { + assert_eq!(response, rpc_response_base.clone()); + } else { + assert_eq!(response, rpc_response_altair.clone()); + } messages_received += 1; debug!(log, "Chunk received"); } @@ -555,11 +564,18 @@ fn test_blocks_by_root_chunked_rpc() { // send the response debug!(log, "Receiver got request"); - for _ in 1..=messages_to_send { + for i in 0..messages_to_send { + // Send first half of responses as base blocks and + // second half as altair blocks. + let rpc_response = if i < 5 { + rpc_response_base.clone() + } else { + rpc_response_altair.clone() + }; receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, - rpc_response.clone(), + rpc_response, ); debug!(log, "Sending message"); } @@ -621,7 +637,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { }); // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 998b95bea..a652484e7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1380,23 +1380,27 @@ pub fn serve( let enr = network_globals.local_enr(); let p2p_addresses = enr.multiaddr_p2p_tcp(); let discovery_addresses = enr.multiaddr_p2p_udp(); + let meta_data = network_globals.local_metadata.read(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), enr, p2p_addresses, discovery_addresses, metadata: api_types::MetaData { - seq_number: network_globals.local_metadata.read().seq_number, + seq_number: *meta_data.seq_number(), attnets: format!( + "0x{}", + hex::encode(meta_data.attnets().clone().into_bytes()), + ), + syncnets: format!( "0x{}", hex::encode( - network_globals - .local_metadata - .read() - .attnets - .clone() + meta_data + .syncnets() + .map(|x| x.clone()) + .unwrap_or_default() .into_bytes() - ), + ) ), }, })) @@ -1896,7 +1900,7 @@ pub fn serve( publish_network_message( &network_tx, - NetworkMessage::Subscribe { + NetworkMessage::AttestationSubscribe { subscriptions: vec![subscription], }, )?; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 470afbf09..c8908b77c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,8 +11,8 @@ use eth2::StatusCode; use eth2::{types::*, BeaconNodeHttpClient, Timeouts}; use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; use eth2_libp2p::{ - rpc::methods::MetaData, - types::{EnrBitfield, SyncState}, + rpc::methods::{MetaData, MetaDataV2}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, Enr, EnrExt, NetworkGlobals, PeerId, }; use futures::stream::{Stream, StreamExt}; @@ -162,10 +162,11 @@ impl ApiTester { let log = null_logger().unwrap(); // Default metadata - let meta_data = MetaData { + let meta_data = MetaData::V2(MetaDataV2 { seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }); let enr_key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let enr_clone = enr.clone(); @@ -277,10 +278,11 @@ impl ApiTester { let log = null_logger().unwrap(); // Default metadata - let meta_data = MetaData { + let meta_data = MetaData::V2(MetaDataV2 { seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }); let enr_key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let enr_clone = enr.clone(); @@ -1338,6 +1340,7 @@ impl ApiTester { metadata: eth2::types::MetaData { seq_number: 0, attnets: "0x0000000000000000".to_string(), + syncnets: "0x00".to_string(), }, }; diff --git a/beacon_node/network/src/attestation_service/tests/mod.rs b/beacon_node/network/src/attestation_service/tests/mod.rs deleted file mode 100644 index 55811a191..000000000 --- a/beacon_node/network/src/attestation_service/tests/mod.rs +++ /dev/null @@ -1,429 +0,0 @@ -use super::*; -use beacon_chain::{ - builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, -}; -use futures::Stream; -use genesis::{generate_deterministic_keypairs, interop_genesis_state}; -use lazy_static::lazy_static; -use matches::assert_matches; -use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::{SlotClock, SystemTimeSlotClock}; -use std::time::{Duration, SystemTime}; -use store::config::StoreConfig; -use store::{HotColdDB, MemoryStore}; -use types::{CommitteeIndex, EthSpec, MinimalEthSpec}; - -const SLOT_DURATION_MILLIS: u64 = 400; - -type TestBeaconChainType = Witness< - SystemTimeSlotClock, - CachingEth1Backend, - MinimalEthSpec, - MemoryStore, - MemoryStore, ->; - -pub struct TestBeaconChain { - chain: Arc>, -} - -impl TestBeaconChain { - pub fn new_with_system_clock() -> Self { - let spec = MinimalEthSpec::default_spec(); - - let keypairs = generate_deterministic_keypairs(1); - - let log = get_logger(); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); - - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - - let chain = Arc::new( - BeaconChainBuilder::new(MinimalEthSpec) - .logger(log.clone()) - .custom_spec(spec.clone()) - .store(Arc::new(store)) - .genesis_state( - interop_genesis_state::(&keypairs, 0, &spec) - .expect("should generate interop state"), - ) - .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(SystemTimeSlotClock::new( - Slot::new(0), - Duration::from_secs(recent_genesis_time()), - Duration::from_millis(SLOT_DURATION_MILLIS), - )) - .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], log) - .build() - .expect("should build"), - ); - Self { chain } - } -} - -pub fn recent_genesis_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() -} - -fn get_logger() -> Logger { - NullLoggerBuilder.build().expect("logger should build") -} - -lazy_static! { - static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); -} - -fn get_attestation_service() -> AttestationService { - let log = get_logger(); - let config = NetworkConfig::default(); - - let beacon_chain = CHAIN.chain.clone(); - - AttestationService::new(beacon_chain, &config, &log) -} - -fn get_subscription( - validator_index: u64, - attestation_committee_index: CommitteeIndex, - slot: Slot, - committee_count_at_slot: u64, -) -> ValidatorSubscription { - let is_aggregator = true; - ValidatorSubscription { - validator_index, - attestation_committee_index, - slot, - committee_count_at_slot, - is_aggregator, - } -} - -fn get_subscriptions( - validator_count: u64, - slot: Slot, - committee_count_at_slot: u64, -) -> Vec { - (0..validator_count) - .map(|validator_index| { - get_subscription( - validator_index, - validator_index, - slot, - committee_count_at_slot, - ) - }) - .collect() -} - -// gets a number of events from the subscription service, or returns none if it times out after a number -// of slots -async fn get_events + Unpin>( - stream: &mut S, - num_events: Option, - num_slots_before_timeout: u32, -) -> Vec { - let mut events = Vec::new(); - - let collect_stream_fut = async { - loop { - if let Some(result) = stream.next().await { - events.push(result); - if let Some(num) = num_events { - if events.len() == num { - return; - } - } - } - } - }; - - tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep( - Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, - ) => { events } - } -} - -#[tokio::test] -async fn subscribe_current_slot_wait_for_unsubscribe() { - // subscription config - let validator_index = 1; - let committee_index = 1; - // Keep a low subscription slot so that there are no additional subnet discovery events. - let subscription_slot = 0; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - committee_count, - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // not enough time for peer discovery, just subscribe, unsubscribe - let subnet_id = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot), - committee_index, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - let expected = vec![ - AttServiceMessage::Subscribe(subnet_id), - AttServiceMessage::Unsubscribe(subnet_id), - ]; - - // Wait for 1 slot duration to get the unsubscription event - let events = get_events(&mut attestation_service, None, 1).await; - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - - // If the long lived and short lived subnets are the same, there should be no more events - // as we don't resubscribe already subscribed subnets. - if !attestation_service.random_subnets.contains(&subnet_id) { - assert_eq!(expected[..], events[3..]); - } - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); -} - -/// Test to verify that we are not unsubscribing to a subnet before a required subscription. -#[tokio::test] -async fn test_same_subnet_unsubscription() { - // subscription config - let validator_index = 1; - let committee_count = 1; - - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). - let subscription_slot1 = 0; - let subscription_slot2 = 1; - let com1 = 1; - let com2 = 0; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let sub1 = get_subscription( - validator_index, - com1, - current_slot + Slot::new(subscription_slot1), - committee_count, - ); - - let sub2 = get_subscription( - validator_index, - com2, - current_slot + Slot::new(subscription_slot2), - committee_count, - ); - - let subnet_id1 = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot1), - com1, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - - let subnet_id2 = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot2), - com2, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - - // Assert that subscriptions are different but their subnet is the same - assert_ne!(sub1, sub2); - assert_eq!(subnet_id1, subnet_id2); - - // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2]) - .unwrap(); - - // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). - let events = get_events(&mut attestation_service, None, 1).await; - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - - let expected = AttServiceMessage::Subscribe(subnet_id1); - - // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service.random_subnets.contains(&subnet_id1) { - assert_eq!(expected, events[3]); - assert_eq!(attestation_service.subscription_count(), 2); - } else { - assert_eq!(attestation_service.subscription_count(), 1); - } - - // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; - - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.random_subnets.contains(&subnet_id1) { - assert_eq!( - [AttServiceMessage::Unsubscribe(subnet_id1)], - unsubscribe_event[..] - ); - } - - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); -} - -#[tokio::test] -async fn subscribe_all_random_subnets() { - let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count; - let subscription_slot = 10; - let subscription_count = attestation_subnet_count; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = get_subscriptions( - subscription_count, - current_slot + subscription_slot, - committee_count, - ); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let events = get_events(&mut attestation_service, None, 3).await; - let mut discover_peer_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in &events { - match event { - AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - AttServiceMessage::Subscribe(_any_subnet) => {} - AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, - } - } - - // The bulk discovery request length should be equal to validator_count - let bulk_discovery_event = events.last().unwrap(); - if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event { - assert_eq!(d.len(), attestation_subnet_count as usize); - } else { - panic!("Unexpected event {:?}", bulk_discovery_event); - } - - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets - // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. - assert_eq!(discover_peer_count, subscription_count + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); - // test completed successfully -} - -#[tokio::test] -async fn subscribe_all_random_subnets_plus_one() { - let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count; - let subscription_slot = 10; - // the 65th subscription should result in no more messages than the previous scenario - let subscription_count = attestation_subnet_count + 1; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = get_subscriptions( - subscription_count, - current_slot + subscription_slot, - committee_count, - ); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let events = get_events(&mut attestation_service, None, 3).await; - let mut discover_peer_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in &events { - match event { - AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - AttServiceMessage::Subscribe(_any_subnet) => {} - AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, - } - } - - // The bulk discovery request length shouldn't exceed max attestation_subnet_count - let bulk_discovery_event = events.last().unwrap(); - if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event { - assert_eq!(d.len(), attestation_subnet_count as usize); - } else { - panic!("Unexpected event {:?}", bulk_discovery_event); - } - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets - // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. - // For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. - assert_eq!(discover_peer_count, 64 + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); -} diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index fb8fbc9dc..91f20e68c 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -57,7 +57,8 @@ use task_executor::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use types::{ Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReadyWork, @@ -121,6 +122,14 @@ const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; /// before we start dropping them. const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; +/// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping +/// them. +const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; + +/// The maximum number of queued `SignedContributionAndProof` objects that will be stored before we +/// start dropping them. +const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; + /// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; @@ -160,6 +169,8 @@ pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; +pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; +pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -327,6 +338,44 @@ impl WorkEvent { } } + /// Create a new `Work` event for some sync committee signature. + pub fn gossip_sync_signature( + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature: Box::new(sync_signature), + subnet_id, + seen_timestamp, + }, + } + } + + /// Create a new `Work` event for some sync committee contribution. + pub fn gossip_sync_contribution( + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution: Box::new(sync_contribution), + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some exit. pub fn gossip_voluntary_exit( message_id: MessageId, @@ -553,6 +602,19 @@ pub enum Work { peer_id: PeerId, attester_slashing: Box>, }, + GossipSyncSignature { + message_id: MessageId, + peer_id: PeerId, + sync_signature: Box, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + }, + GossipSyncContribution { + message_id: MessageId, + peer_id: PeerId, + sync_contribution: Box>, + seen_timestamp: Duration, + }, RpcBlock { block: Box>, result_tx: BlockResultSender, @@ -588,6 +650,8 @@ impl Work { Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, + Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, + Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, Work::RpcBlock { .. } => RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, @@ -730,6 +794,9 @@ impl BeaconProcessor { let mut unknown_block_attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); + let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); + // Using a FIFO queue for voluntary exits since it prevents exit censoring. I don't have // a strong feeling about queue type for exits. let mut gossip_voluntary_exit_queue = FifoQueue::new(MAX_GOSSIP_EXIT_QUEUE_LEN); @@ -859,6 +926,12 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = attestation_queue.pop() { self.spawn_worker(item, toolbox); + // Check sync committee messages after attestations as their rewards are lesser + // and they don't influence fork choice. + } else if let Some(item) = sync_contribution_queue.pop() { + self.spawn_worker(item, toolbox); + } else if let Some(item) = sync_message_queue.pop() { + self.spawn_worker(item, toolbox); // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { @@ -952,6 +1025,10 @@ impl BeaconProcessor { Work::GossipAttesterSlashing { .. } => { gossip_attester_slashing_queue.push(work, work_id, &self.log) } + Work::GossipSyncSignature { .. } => sync_message_queue.push(work), + Work::GossipSyncContribution { .. } => { + sync_contribution_queue.push(work) + } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) @@ -985,6 +1062,14 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL, aggregate_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL, + sync_message_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL, + sync_contribution_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, @@ -1188,6 +1273,36 @@ impl BeaconProcessor { peer_id, *attester_slashing, ), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ), /* * Verification for beacon blocks received during syncing via RPC. */ diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 617b9a871..30cc17242 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,8 +9,12 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use environment::{null_logger, Environment, EnvironmentBuilder}; -use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; -use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId}; +use eth2_libp2p::{ + discv5::enr::{CombinedKey, EnrBuilder}, + rpc::methods::{MetaData, MetaDataV2}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, + MessageId, NetworkGlobals, PeerId, +}; use slot_clock::SlotClock; use std::cmp; use std::iter::Iterator; @@ -163,10 +167,11 @@ impl TestRig { let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata - let meta_data = MetaData { + let meta_data = MetaData::V2(MetaDataV2 { seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }); let enr_key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 0040a996f..4c71e3ca0 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,6 +3,7 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::{ attestation_verification::{Error as AttnError, SignatureVerifiedAttestation}, observed_operations::ObservationOutcome, + sync_committee_verification::Error as SyncCommitteeError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, }; @@ -14,7 +15,8 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -688,6 +690,131 @@ impl Worker { } } + /// Process the sync committee signature received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to add it to the naive aggregation pool. + /// + /// Raises a log if there are errors. + pub fn process_gossip_sync_committee_signature( + self, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + _seen_timestamp: Duration, + ) { + let sync_signature = match self + .chain + .verify_sync_committee_message_for_gossip(sync_signature, subnet_id) + { + Ok(sync_signature) => sync_signature, + Err(e) => { + self.handle_sync_committee_message_failure( + peer_id, + message_id, + "sync_signature", + e, + ); + return; + } + }; + + /*TODO: + // Register the sync signature with any monitored validators. + self.chain + .validator_monitor + .read() + .register_gossip_unaggregated_attestation( + seen_timestamp, + attestation.indexed_attestation(), + &self.chain.slot_clock, + ); + */ + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL); + + if let Err(e) = self + .chain + .add_to_naive_sync_aggregation_pool(sync_signature) + { + debug!( + self.log, + "Sync committee signature invalid for agg pool"; + "reason" => ?e, + "peer" => %peer_id, + ) + } + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL); + } + + /// Process the sync committee contribution received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to add it to the block inclusion pool. + /// + /// Raises a log if there are errors. + pub fn process_sync_committee_contribution( + self, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + _seen_timestamp: Duration, + ) { + let sync_contribution = match self + .chain + .verify_sync_contribution_for_gossip(sync_contribution) + { + Ok(sync_contribution) => sync_contribution, + Err(e) => { + // Report the failure to gossipsub + self.handle_sync_committee_message_failure( + peer_id, + message_id, + "sync_contribution", + e, + ); + return; + } + }; + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + /* TODO + // Register the attestation with any monitored validators. + self.chain + .validator_monitor + .read() + .register_gossip_aggregated_attestation( + seen_timestamp, + aggregate.aggregate(), + aggregate.indexed_attestation(), + &self.chain.slot_clock, + ); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL); + */ + + if let Err(e) = self + .chain + .add_contribution_to_block_inclusion_pool(sync_contribution) + { + debug!( + self.log, + "Sync contribution invalid for op pool"; + "reason" => ?e, + "peer" => %peer_id, + ) + } + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL); + } + /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( @@ -740,8 +867,7 @@ impl Worker { /* * The aggregate had no signatures and is therefore worthless. * - * Whilst we don't gossip this attestation, this act is **not** a clear - * violation of the spec nor indication of fault. + * This is forbidden by the p2p spec. Reject the message. * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); @@ -1079,4 +1205,242 @@ impl Worker { "type" => ?attestation_type, ); } + + /// Handle an error whilst verifying a `SyncCommitteeMessage` or `SignedContributionAndProof` from the + /// network. + pub fn handle_sync_committee_message_failure( + &self, + peer_id: PeerId, + message_id: MessageId, + message_type: &str, + error: SyncCommitteeError, + ) { + metrics::register_sync_committee_error(&error); + + match &error { + SyncCommitteeError::FutureSlot { .. } | SyncCommitteeError::PastSlot { .. } => { + /* + * These errors can be triggered by a mismatch between our slot and the peer. + * + * + * The peer has published an invalid consensus message, _only_ if we trust our own clock. + */ + trace!( + self.log, + "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + + // Peers that are slow or not to spec can spam us with these messages draining our + // bandwidth. We therefore penalize these peers when they do this. + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + + // Do not propagate these messages. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + SyncCommitteeError::EmptyAggregationBitfield => { + /* + * The aggregate had no signatures and is therefore worthless. + * + * This is forbidden by the p2p spec. Reject the message. + * + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSelectionProof { .. } + | SyncCommitteeError::InvalidSignature => { + /* + * These errors are caused by invalid signatures. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::AggregatorNotInCommittee { .. } + | SyncCommitteeError::AggregatorPubkeyUnknown(_) => { + /* + * The aggregator is not in the committee for the given `ContributionAndSync` OR + The aggregator index was higher than any known validator index + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::SyncContributionAlreadyKnown(_) + | SyncCommitteeError::AggregatorAlreadyKnown(_) => { + /* + * The sync committee message already been observed on the network or in + * a block. + * + * The peer is not necessarily faulty. + */ + trace!( + self.log, + "Sync committee message is already known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + SyncCommitteeError::UnknownValidatorIndex(_) => { + /* + * The aggregator index (or similar field) was higher than the maximum + * possible number of validators. + * + * The peer has published an invalid consensus message. + */ + debug!( + self.log, + "Validation Index too high"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::UnknownValidatorPubkey(_) => { + debug!( + self.log, + "Validator pubkey is unknown"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSubnetId { received, expected } => { + /* + * The sync committee message was received on an incorrect subnet id. + */ + debug!( + self.log, + "Received sync committee message on incorrect subnet"; + "expected" => ?expected, + "received" => ?received, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::Invalid(_) => { + /* + * The sync committee message failed the state_processing verification. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::PriorSyncCommitteeMessageKnown { .. } => { + /* + * We have already seen a sync committee message from this validator for this epoch. + * + * The peer is not necessarily faulty. + */ + debug!( + self.log, + "Prior sync committee message known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + // We still penalize the peer slightly. We don't want this to be a recurring + // behaviour. + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + return; + } + SyncCommitteeError::BeaconChainError(e) => { + /* + * Lighthouse hit an unexpected error whilst processing the sync committee message. It + * should be impossible to trigger a `BeaconChainError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + self.log, + "Unable to validate sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::BeaconStateError(e) => { + /* + * Lighthouse hit an unexpected error whilst processing the sync committee message. It + * should be impossible to trigger a `BeaconStateError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + self.log, + "Unable to validate sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::ContributionError(e) => { + error!( + self.log, + "Error while processing sync contribution"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::SyncCommitteeError(e) => { + error!( + self.log, + "Error while processing sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::ArithError(e) => { + /* + This would most likely imply incompatible configs or an invalid message. + */ + error!( + self.log, + "Arithematic error while processing sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSubcommittee { .. } => { + /* + The subcommittee index is higher than `SYNC_COMMITTEE_SUBNET_COUNT`. This would imply + an invalid message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + } + debug!( + self.log, + "Invalid sync committee message from network"; + "reason" => ?error, + "peer_id" => %peer_id, + "type" => ?message_type, + ); + } } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 31bad7a34..934442e12 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -6,7 +6,6 @@ pub mod error; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; -mod attestation_service; mod beacon_processor; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; @@ -14,6 +13,7 @@ mod nat; mod persisted_dht; mod router; mod status; +mod subnet_service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod sync; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index bc0537e28..7ffce1254 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,4 +1,7 @@ -use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::{ + attestation_verification::Error as AttnError, + sync_committee_verification::Error as SyncCommitteeError, +}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::{ types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, @@ -7,7 +10,10 @@ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use std::{collections::HashMap, sync::Arc}; use strum::AsStaticRef; -use types::{subnet_id::subnet_id_to_string, EthSpec}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, subnet_id::subnet_id_to_string, + sync_subnet_id::sync_subnet_id_to_string, EthSpec, +}; lazy_static! { @@ -20,15 +26,27 @@ lazy_static! { &["protocol"] ); - pub static ref GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_subnets", - "Subnets currently subscribed to", + pub static ref GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_subscribed_attestation_subnets", + "Attestation subnets currently subscribed to", &["subnet"] ); - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_subnet_topic_count", - "Peers subscribed per subnet topic", + pub static ref GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_subscribed_sync_subnets", + "Sync subnets currently subscribed to", + &["subnet"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_attestation_subnet_topic_count", + "Peers subscribed per attestation subnet topic", + &["subnet"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_sync_subnet_topic_count", + "Peers subscribed per sync subnet topic", &["subnet"] ); @@ -38,7 +56,13 @@ lazy_static! { &["topic_hash"] ); - pub static ref MESH_PEERS_PER_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + pub static ref MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_mesh_peers_per_subnet_topic", + "Mesh peers per subnet topic", + &["subnet"] + ); + + pub static ref MESH_PEERS_PER_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( "gossipsub_mesh_peers_per_subnet_topic", "Mesh peers per subnet topic", &["subnet"] @@ -50,9 +74,15 @@ lazy_static! { &["topic_hash"] ); - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_subnet_topic", - "Average peer's score per subnet topic", + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_float_gauge_vec( + "gossipsub_avg_peer_score_per_attestation_subnet_topic", + "Average peer's score per attestation subnet topic", + &["subnet"] + ); + + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC: Result = try_create_float_gauge_vec( + "gossipsub_avg_peer_score_per_sync_subnet_topic", + "Average peer's score per sync committee subnet topic", &["subnet"] ); @@ -133,6 +163,14 @@ lazy_static! { "gossipsub_aggregated_attestations_rx_total", "Count of gossip aggregated attestations received" ); + pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_RX: Result = try_create_int_counter( + "gossipsub_sync_committee_message_rx_total", + "Count of gossip sync committee messages received" + ); + pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX: Result = try_create_int_counter( + "gossipsub_sync_committee_contribution_received_total", + "Count of gossip sync committee contributions received" + ); /* @@ -150,19 +188,35 @@ lazy_static! { "gossipsub_aggregated_attestations_tx_total", "Count of gossip aggregated attestations transmitted" ); + pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_TX: Result = try_create_int_counter( + "gossipsub_sync_committee_message_tx_total", + "Count of gossip sync committee messages transmitted" + ); + pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX: Result = try_create_int_counter( + "gossipsub_sync_committee_contribution_tx_total", + "Count of gossip sync committee contributions transmitted" + ); /* * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_subnet_subscriptions_total", - "Count of validator subscription requests." + "gossipsub_attestation_subnet_subscriptions_total", + "Count of validator attestation subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( "gossipsub_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); + /* + * Sync committee subnet subscriptions + */ + pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( + "gossipsub_sync_committee_subnet_subscriptions_total", + "Count of validator sync committee subscription requests." + ); + /* * Gossip processor */ @@ -322,6 +376,33 @@ lazy_static! { "beacon_processor_aggregated_attestation_requeued_total", "Total number of aggregated attestations that referenced an unknown block and were re-queued." ); + // Sync committee messages. + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_message_queue_total", + "Count of sync committee messages waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_message_verified_total", + "Total number of sync committee messages verified for gossip." + ); + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_message_imported_total", + "Total number of sync committee messages imported to fork choice, etc." + ); + // Sync contribution. + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_contribution_queue_total", + "Count of sync committee contributions waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_contribution_verified_total", + "Total number of sync committee contributions verified for gossip." + ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_contribution_imported_total", + "Total number of sync committee contributions imported to fork choice, etc." + ); + } lazy_static! { @@ -331,6 +412,12 @@ lazy_static! { "Gossipsub attestation errors per error type", &["type"] ); + pub static ref GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_sync_committee_errors_per_type", + "Gossipsub sync_committee errors per error type", + &["type"] + ); pub static ref INBOUND_LIBP2P_BYTES: Result = try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( @@ -402,6 +489,10 @@ pub fn register_attestation_error(error: &AttnError) { inc_counter_vec(&GOSSIP_ATTESTATION_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub fn register_sync_committee_error(error: &SyncCommitteeError) { + inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + /// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. pub fn expose_publish_metrics(messages: &[PubsubMessage]) { for message in messages { @@ -417,6 +508,12 @@ pub fn expose_publish_metrics(messages: &[PubsubMessage]) { PubsubMessage::AggregateAndProofAttestation(_) => { inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) } + PubsubMessage::SyncCommitteeMessage(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_TX) + } + PubsubMessage::SignedContributionAndProof(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX) + } _ => {} } } @@ -430,6 +527,10 @@ pub fn expose_receive_metrics(message: &PubsubMessage) { PubsubMessage::AggregateAndProofAttestation(_) => { inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) } + PubsubMessage::SyncCommitteeMessage(_) => inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_RX), + PubsubMessage::SignedContributionAndProof(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX) + } _ => {} } } @@ -447,7 +548,10 @@ pub fn update_gossip_metrics( let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC .as_ref() .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC .as_ref() .map(|gauge| gauge.reset()); @@ -478,30 +582,50 @@ pub fn update_gossip_metrics( // reset the mesh peers, showing all subnets for subnet_id in 0..T::default_spec().attestation_subnet_count { let _ = get_int_gauge( - &MESH_PEERS_PER_SUBNET_TOPIC, + &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); } + for subnet_id in 0..SYNC_COMMITTEE_SUBNET_COUNT { + let _ = get_int_gauge( + &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + } + // Subnet topics subscribed to for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { if let GossipKind::Attestation(subnet_id) = topic.kind() { let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) .map(|v| v.set(1)); @@ -519,7 +643,7 @@ pub fn update_gossip_metrics( match topic.kind() { GossipKind::Attestation(subnet_id) => { if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.inc() @@ -528,13 +652,31 @@ pub fn update_gossip_metrics( // average peer scores if let Some(score) = gossipsub.peer_score(peer_id) { if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.add(score) }; } } + GossipKind::SyncCommitteeMessage(subnet_id) => { + if let Some(v) = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.inc() + }; + + // average peer scores + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.add(score) + }; + } + } kind => { // main topics if let Some(score) = gossipsub.peer_score(peer_id) { @@ -557,12 +699,21 @@ pub fn update_gossip_metrics( GossipKind::Attestation(subnet_id) => { // average peer scores if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.set(v.get() / (*peers as f64)) }; } + GossipKind::SyncCommitteeMessage(subnet_id) => { + // average peer scores + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.set(v.get() / (*peers as f64)) + }; + } kind => { // main topics if let Some(v) = @@ -582,12 +733,20 @@ pub fn update_gossip_metrics( match topic.kind() { GossipKind::Attestation(subnet_id) => { if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_SUBNET_TOPIC, + &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.set(peers as i64) }; } + GossipKind::SyncCommitteeMessage(subnet_id) => { + if let Some(v) = get_int_gauge( + &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.set(peers as i64) + }; + } kind => { // main topics if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 8d9b1cd62..5096a4bdc 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -247,6 +247,31 @@ impl Router { self.processor .on_attester_slashing_gossip(id, peer_id, attester_slashing); } + PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { + trace!( + self.log, + "Received sync committee aggregate"; + "peer_id" => %peer_id + ); + self.processor.on_sync_committee_contribution_gossip( + id, + peer_id, + *contribution_and_proof, + ); + } + PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { + trace!( + self.log, + "Received sync committee signature"; + "peer_id" => %peer_id + ); + self.processor.on_sync_committee_signature_gossip( + id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + ); + } } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 590d52da6..103ab85dc 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -10,10 +10,11 @@ use slog::{debug, error, o, trace, warn}; use std::cmp; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, ChainSpec, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -309,6 +310,36 @@ impl Processor { )) } + pub fn on_sync_committee_signature_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( + message_id, + peer_id, + sync_signature, + subnet_id, + timestamp_now(), + )) + } + + pub fn on_sync_committee_contribution_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( + message_id, + peer_id, + sync_contribution, + timestamp_now(), + )) + } + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { self.beacon_processor_send .try_send(work) @@ -328,10 +359,7 @@ pub(crate) fn status_message( beacon_chain: &BeaconChain, ) -> Result { let head_info = beacon_chain.head_info()?; - let genesis_validators_root = beacon_chain.genesis_validators_root; - - let fork_digest = - ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); + let fork_digest = beacon_chain.enr_fork_id().fork_digest; Ok(StatusMessage { fork_digest, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 1f94c387d..26c1e272f 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,38 +1,51 @@ use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; +use crate::subnet_service::SyncCommitteeService; +use crate::{error, metrics}; use crate::{ - attestation_service::{AttServiceMessage, AttestationService}, + subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use crate::{error, metrics}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, + Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, +}; +use eth2_libp2p::{ + types::{GossipEncoding, GossipTopic}, + BehaviourEvent, MessageId, NetworkGlobals, PeerId, }; -use eth2_libp2p::{types::GossipKind, BehaviourEvent, MessageId, NetworkGlobals, PeerId}; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; +use futures::future::OptionFuture; use futures::prelude::*; -use slog::{debug, error, info, o, trace, warn}; -use std::{net::SocketAddr, sync::Arc, time::Duration}; +use slog::{crit, debug, error, info, o, trace, warn}; +use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; -use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription}; +use types::{ + EthSpec, ForkContext, ForkName, RelativeEpoch, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, Unsigned, ValidatorSubscription, +}; mod tests; /// The interval (in seconds) that various network metrics will update. const METRIC_UPDATE_INTERVAL: u64 = 1; +/// Delay after a fork where we unsubscribe from pre-fork topics. +const UNSUBSCRIBE_DELAY_EPOCHS: u64 = 2; /// Types of messages that the network service can receive. #[derive(Debug)] pub enum NetworkMessage { /// Subscribes a list of validators to specific slots for attestation duties. - Subscribe { + AttestationSubscribe { subscriptions: Vec, }, + SyncCommitteeSubscribe { + subscriptions: Vec, + }, /// Subscribes the beacon node to the core gossipsub topics. We do this when we are either /// synced or close to the head slot. SubscribeCoreTopics, @@ -97,6 +110,8 @@ pub struct NetworkService { libp2p: LibP2PService, /// An attestation and subnet manager service. attestation_service: AttestationService, + /// A sync committeee subnet manager service. + sync_committee_service: SyncCommitteeService, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver>, /// The sending channel for the network service to send messages to be routed throughout @@ -113,7 +128,9 @@ pub struct NetworkService { /// update the UDP socket of discovery if the UPnP mappings get established. discovery_auto_update: bool, /// A delay that expires when a new fork takes place. - next_fork_update: Option, + next_fork_update: Pin>>, + /// A delay that expires when we need to unsubscribe from old fork topics. + next_unsubscribe: Pin>>, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// A timer for updating various network metrics. @@ -121,6 +138,7 @@ pub struct NetworkService { /// gossipsub_parameter_update timer gossipsub_parameter_update: tokio::time::Interval, /// The logger for the network service. + fork_context: Arc, log: slog::Logger, } @@ -158,7 +176,19 @@ impl NetworkService { let enr_fork_id = beacon_chain.enr_fork_id(); // keep track of when our fork_id needs to be updated - let next_fork_update = next_fork_delay(&beacon_chain); + let next_fork_update = Box::pin(next_fork_delay(&beacon_chain).into()); + let next_unsubscribe = Box::pin(None.into()); + + let current_slot = beacon_chain + .slot() + .unwrap_or(beacon_chain.spec.genesis_slot); + + // Create a fork context for the given config and genesis validators root + let fork_context = Arc::new(ForkContext::new::( + current_slot, + beacon_chain.genesis_validators_root, + &beacon_chain.spec, + )); // launch libp2p service let (network_globals, mut libp2p) = LibP2PService::new( @@ -166,6 +196,7 @@ impl NetworkService { config, enr_fork_id, &network_log, + fork_context.clone(), &beacon_chain.spec, ) .await?; @@ -193,10 +224,14 @@ impl NetworkService { network_log.clone(), )?; - // attestation service + // attestation subnet service let attestation_service = AttestationService::new(beacon_chain.clone(), config, &network_log); + // sync committee subnet service + let sync_committee_service = + SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); + // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -209,6 +244,7 @@ impl NetworkService { beacon_chain, libp2p, attestation_service, + sync_committee_service, network_recv, router_send, store, @@ -216,9 +252,11 @@ impl NetworkService { upnp_mappings: (None, None), discovery_auto_update: config.discv5_config.enr_update, next_fork_update, + next_unsubscribe, subscribe_all_subnets: config.subscribe_all_subnets, metrics_update, gossipsub_parameter_update, + fork_context, log: network_log, }; @@ -226,6 +264,26 @@ impl NetworkService { Ok((network_globals, network_send)) } + + /// Returns the required fork digests that gossipsub needs to subscribe to based on the current slot. + /// + /// For `current_slot < fork_slot`, this function returns both the pre-fork and post-fork + /// digests since we should be subscribed to post fork topics before the fork. + pub fn required_gossip_fork_digests(&self) -> Vec<[u8; 4]> { + let fork_context = &self.fork_context; + match fork_context.current_fork() { + ForkName::Base => { + if fork_context.fork_exists(ForkName::Altair) { + fork_context.all_fork_digests() + } else { + vec![fork_context.genesis_context_bytes()] + } + } + ForkName::Altair => vec![fork_context + .to_context_bytes(ForkName::Altair) + .expect("Altair fork bytes should exist as it's initialized in ForkContext")], + } + } } fn spawn_service( @@ -363,42 +421,69 @@ fn spawn_service( } NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::Subscribe { subscriptions } => { + NetworkMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = service .attestation_service .validator_subscriptions(subscriptions) { - warn!(service.log, "Validator subscription failed"; "error" => e); + warn!(service.log, "Attestation validator subscription failed"; "error" => e); + } + } + NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = service + .sync_committee_service + .validator_subscriptions(subscriptions) { + warn!(service.log, "Sync committee calidator subscription failed"; "error" => e); } } NetworkMessage::SubscribeCoreTopics => { - let mut subscribed_topics: Vec = vec![]; - let already_subscribed = service.network_globals.gossipsub_subscriptions.read().clone(); - let already_subscribed = already_subscribed.iter().map(|x| x.kind()).collect::>(); - for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter().filter(|topic| already_subscribed.get(topic).is_none()) { - if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); + let mut subscribed_topics: Vec = vec![]; + for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter() { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } } } - // if we are to subscribe to all subnets we do it here + // If we are to subscribe to all subnets we do it here if service.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { - let subnet_id = SubnetId::new(subnet_id); - let topic_kind = eth2_libp2p::types::GossipKind::Attestation(subnet_id); - if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - // Update the ENR bitfield. - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); + let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); + // Update the ENR bitfield + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } + for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { + let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); + // Update the ENR bitfield + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } } if !subscribed_topics.is_empty() { - info!(service.log, "Subscribed to topics"; "topics" => ?subscribed_topics); + info!( + service.log, + "Subscribed to topics"; + "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + ); } } } @@ -406,19 +491,51 @@ fn spawn_service( // process any attestation service events Some(attestation_service_message) = service.attestation_service.next() => { match attestation_service_message { - AttServiceMessage::Subscribe(subnet_id) => { - service.libp2p.swarm.behaviour_mut().subscribe_to_subnet(subnet_id); + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().subscribe(topic); + } } - AttServiceMessage::Unsubscribe(subnet_id) => { - service.libp2p.swarm.behaviour_mut().unsubscribe_from_subnet(subnet_id); + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } } - AttServiceMessage::EnrAdd(subnet_id) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); + SubnetServiceMessage::EnrAdd(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); } - AttServiceMessage::EnrRemove(subnet_id) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, false); + SubnetServiceMessage::EnrRemove(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); } - AttServiceMessage::DiscoverPeers(subnets_to_discover) => { + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + } + } + } + // process any sync committee service events + Some(sync_committee_service_message) = service.sync_committee_service.next() => { + match sync_committee_service_message { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); } } @@ -532,34 +649,57 @@ fn spawn_service( } } } - } + Some(_) = &mut service.next_fork_update => { + let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - if let Some(delay) = &service.next_fork_update { - if delay.is_elapsed() { - service - .libp2p - .swarm - .behaviour_mut() - .update_fork_version(service.beacon_chain.enr_fork_id()); - service.next_fork_update = next_fork_delay(&service.beacon_chain); + let fork_context = &service.fork_context; + if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + info!( + service.log, + "Updating enr fork version"; + "old_fork" => ?fork_context.current_fork(), + "new_fork" => ?new_fork_name, + ); + fork_context.update_current_fork(*new_fork_name); + + service + .libp2p + .swarm + .behaviour_mut() + .update_fork_version(new_enr_fork_id.clone()); + // Reinitialize the next_fork_update + service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); + + // Set the next_unsubscribe delay. + let epoch_duration = service.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); + let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + service.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); + info!(service.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + } else { + crit!(service.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + } + + } + Some(_) = &mut service.next_unsubscribe => { + let new_enr_fork_id = service.beacon_chain.enr_fork_id(); + service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + info!(service.log, "Unsubscribed from old fork topics"); + service.next_unsubscribe = Box::pin(None.into()); } } - metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); } }, "network"); } -/// Returns a `Sleep` that triggers shortly after the next change in the beacon chain fork version. +/// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. /// If there is no scheduled fork, `None` is returned. fn next_fork_delay( beacon_chain: &BeaconChain, ) -> Option { - beacon_chain.duration_to_next_fork().map(|until_fork| { - // Add a short time-out to start within the new fork period. - let delay = Duration::from_millis(200); - tokio::time::sleep_until(tokio::time::Instant::now() + until_fork + delay) - }) + beacon_chain + .duration_to_next_fork() + .map(|(_, until_fork)| tokio::time::sleep(until_fork)) } impl Drop for NetworkService { diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 41cc990ed..d4eeba57d 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,4 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::ChainSpec; use eth2_libp2p::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -13,10 +12,7 @@ pub trait ToStatusMessage { impl ToStatusMessage for BeaconChain { fn status_message(&self) -> Result { let head_info = self.head_info()?; - let genesis_validators_root = self.genesis_validators_root; - - let fork_digest = - ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); + let fork_digest = self.enr_fork_id().fork_digest; Ok(StatusMessage { fork_digest, diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs similarity index 91% rename from beacon_node/network/src/attestation_service/mod.rs rename to beacon_node/network/src/subnet_service/attestation_subnets.rs index 09c0ff895..dcfd35897 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -2,6 +2,7 @@ //! given time. It schedules subscriptions to shard subnets, requests peer discoveries and //! determines whether attestations should be aggregated and/or passed to the beacon node. +use super::SubnetServiceMessage; use std::collections::{HashMap, HashSet, VecDeque}; use std::pin::Pin; use std::sync::Arc; @@ -13,16 +14,13 @@ use rand::seq::SliceRandom; use slog::{debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::{NetworkConfig, SubnetDiscovery}; +use eth2_libp2p::{NetworkConfig, Subnet, SubnetDiscovery}; use hashset_delay::HashSetDelay; use slot_clock::SlotClock; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; use crate::metrics; -#[cfg(test)] -mod tests; - /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. /// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. @@ -30,7 +28,6 @@ const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random /// gossip topics that we subscribed to due to the validator connection. const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; -// 30 mins at a 12s slot time /// The fraction of a slot that we subscribe to a subnet before the required slot. /// /// Note: The time is calculated as `time = seconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. @@ -39,46 +36,6 @@ const ADVANCE_SUBSCRIBE_TIME: u32 = 3; /// 36s at 12s slot time const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; -#[derive(Debug, Clone)] -pub enum AttServiceMessage { - /// Subscribe to the specified subnet id. - Subscribe(SubnetId), - /// Unsubscribe to the specified subnet id. - Unsubscribe(SubnetId), - /// Add the `SubnetId` to the ENR bitfield. - EnrAdd(SubnetId), - /// Remove the `SubnetId` from the ENR bitfield. - EnrRemove(SubnetId), - /// Discover peers for a list of `SubnetDiscovery`. - DiscoverPeers(Vec), -} - -/// Note: This `PartialEq` impl is for use only in tests. -/// The `DiscoverPeers` comparison is good enough for testing only. -#[cfg(test)] -impl PartialEq for AttServiceMessage { - fn eq(&self, other: &AttServiceMessage) -> bool { - match (self, other) { - (AttServiceMessage::Subscribe(a), AttServiceMessage::Subscribe(b)) => a == b, - (AttServiceMessage::Unsubscribe(a), AttServiceMessage::Unsubscribe(b)) => a == b, - (AttServiceMessage::EnrAdd(a), AttServiceMessage::EnrAdd(b)) => a == b, - (AttServiceMessage::EnrRemove(a), AttServiceMessage::EnrRemove(b)) => a == b, - (AttServiceMessage::DiscoverPeers(a), AttServiceMessage::DiscoverPeers(b)) => { - if a.len() != b.len() { - return false; - } - for i in 0..a.len() { - if a[i].subnet_id != b[i].subnet_id || a[i].min_ttl != b[i].min_ttl { - return false; - } - } - true - } - _ => false, - } - } -} - /// A particular subnet at a given slot. #[derive(PartialEq, Eq, Hash, Clone, Debug)] pub struct ExactSubnet { @@ -90,13 +47,13 @@ pub struct ExactSubnet { pub struct AttestationService { /// Queued events to return to the driving service. - events: VecDeque, + events: VecDeque, /// A reference to the beacon chain to process received attestations. - beacon_chain: Arc>, + pub(crate) beacon_chain: Arc>, /// The collection of currently subscribed random subnets mapped to their expiry deadline. - random_subnets: HashSetDelay, + pub(crate) random_subnets: HashSetDelay, /// The collection of all currently subscribed subnets (long-lived **and** short-lived). subscriptions: HashSet, @@ -332,7 +289,7 @@ impl AttestationService { .duration_to_slot(exact_subnet.slot + 1) .map(|duration| std::time::Instant::now() + duration); Some(SubnetDiscovery { - subnet_id: exact_subnet.subnet_id, + subnet: Subnet::Attestation(exact_subnet.subnet_id), min_ttl, }) } else { @@ -349,7 +306,7 @@ impl AttestationService { if !discovery_subnets.is_empty() { self.events - .push_back(AttServiceMessage::DiscoverPeers(discovery_subnets)); + .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); } Ok(()) } @@ -474,8 +431,8 @@ impl AttestationService { // However, subscribing to random subnets ideally shouldn't happen very often (once in ~27 hours) and // this makes it easier to deterministically test the attestations service. self.events - .push_back(AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet_id, + .push_back(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: Subnet::Attestation(subnet_id), min_ttl: None, }])); @@ -484,11 +441,14 @@ impl AttestationService { self.subscriptions.insert(subnet_id); debug!(self.log, "Subscribing to random subnet"; "subnet_id" => ?subnet_id); self.events - .push_back(AttServiceMessage::Subscribe(subnet_id)); + .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id, + ))); } // add the subnet to the ENR bitfield - self.events.push_back(AttServiceMessage::EnrAdd(subnet_id)); + self.events + .push_back(SubnetServiceMessage::EnrAdd(Subnet::Attestation(subnet_id))); } } @@ -525,7 +485,9 @@ impl AttestationService { debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot.as_u64()); self.subscriptions.insert(exact_subnet.subnet_id); self.events - .push_back(AttServiceMessage::Subscribe(exact_subnet.subnet_id)); + .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( + exact_subnet.subnet_id, + ))); } } } @@ -544,7 +506,9 @@ impl AttestationService { self.subscriptions.remove(&exact_subnet.subnet_id); self.events - .push_back(AttServiceMessage::Unsubscribe(exact_subnet.subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + exact_subnet.subnet_id, + ))); } /// A random subnet has expired. @@ -567,12 +531,16 @@ impl AttestationService { // we are not at capacity, unsubscribe from the current subnet. debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id); self.events - .push_back(AttServiceMessage::Unsubscribe(subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet_id, + ))); } // Remove the ENR bitfield bit and choose a new random on from the available subnets self.events - .push_back(AttServiceMessage::EnrRemove(subnet_id)); + .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + subnet_id, + ))); // Subscribe to a new random subnet self.subscribe_to_random_subnets(1); } @@ -606,19 +574,23 @@ impl AttestationService { .any(|s| s.subnet_id == *subnet_id) { self.events - .push_back(AttServiceMessage::Unsubscribe(*subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + *subnet_id, + ))); } // as the long lasting subnet subscription is being removed, remove the subnet_id from // the ENR bitfield self.events - .push_back(AttServiceMessage::EnrRemove(*subnet_id)); + .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + *subnet_id, + ))); self.random_subnets.remove(subnet_id); } } } impl Stream for AttestationService { - type Item = AttServiceMessage; + type Item = SubnetServiceMessage; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // update the waker if needed diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs new file mode 100644 index 000000000..4df540d9b --- /dev/null +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -0,0 +1,50 @@ +pub mod attestation_subnets; +pub mod sync_subnets; + +use eth2_libp2p::{Subnet, SubnetDiscovery}; + +pub use attestation_subnets::AttestationService; +pub use sync_subnets::SyncCommitteeService; + +#[cfg(test)] +mod tests; + +#[derive(Debug, Clone)] +pub enum SubnetServiceMessage { + /// Subscribe to the specified subnet id. + Subscribe(Subnet), + /// Unsubscribe to the specified subnet id. + Unsubscribe(Subnet), + /// Add the `SubnetId` to the ENR bitfield. + EnrAdd(Subnet), + /// Remove the `SubnetId` from the ENR bitfield. + EnrRemove(Subnet), + /// Discover peers for a list of `SubnetDiscovery`. + DiscoverPeers(Vec), +} + +/// Note: This `PartialEq` impl is for use only in tests. +/// The `DiscoverPeers` comparison is good enough for testing only. +#[cfg(test)] +impl PartialEq for SubnetServiceMessage { + fn eq(&self, other: &SubnetServiceMessage) -> bool { + match (self, other) { + (SubnetServiceMessage::Subscribe(a), SubnetServiceMessage::Subscribe(b)) => a == b, + (SubnetServiceMessage::Unsubscribe(a), SubnetServiceMessage::Unsubscribe(b)) => a == b, + (SubnetServiceMessage::EnrAdd(a), SubnetServiceMessage::EnrAdd(b)) => a == b, + (SubnetServiceMessage::EnrRemove(a), SubnetServiceMessage::EnrRemove(b)) => a == b, + (SubnetServiceMessage::DiscoverPeers(a), SubnetServiceMessage::DiscoverPeers(b)) => { + if a.len() != b.len() { + return false; + } + for i in 0..a.len() { + if a[i].subnet != b[i].subnet || a[i].min_ttl != b[i].min_ttl { + return false; + } + } + true + } + _ => false, + } + } +} diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs new file mode 100644 index 000000000..4162fdd16 --- /dev/null +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -0,0 +1,350 @@ +//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. + +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use futures::prelude::*; +use slog::{debug, error, o, trace, warn}; + +use super::SubnetServiceMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::{NetworkConfig, Subnet, SubnetDiscovery}; +use hashset_delay::HashSetDelay; +use slot_clock::SlotClock; +use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; + +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. +const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; + +/// A particular subnet at a given slot. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct ExactSubnet { + /// The `SyncSubnetId` associated with this subnet. + pub subnet_id: SyncSubnetId, + /// The epoch until which we need to stay subscribed to the subnet. + pub until_epoch: Epoch, +} +pub struct SyncCommitteeService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// The collection of all currently subscribed subnets. + subscriptions: HashMap, + + /// A collection of timeouts for when to unsubscribe from a subnet. + unsubscriptions: HashSetDelay, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// The logger for the attestation service. + log: slog::Logger, +} + +impl SyncCommitteeService { + /* Public functions */ + + pub fn new( + beacon_chain: Arc>, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "sync_committee_service")); + + let spec = &beacon_chain.spec; + let epoch_duration_secs = + beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); + let default_timeout = + epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); + + SyncCommitteeService { + events: VecDeque::with_capacity(10), + beacon_chain, + subscriptions: HashMap::new(), + unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), + waker: None, + subscribe_all_subnets: config.subscribe_all_subnets, + discovery_disabled: config.disable_discovery, + log, + } + } + + /// Return count of all currently subscribed subnets. + #[cfg(test)] + pub fn subscription_count(&self) -> usize { + use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; + if self.subscribe_all_subnets { + SYNC_COMMITTEE_SUBNET_COUNT as usize + } else { + self.subscriptions.len() + } + } + + /// Processes a list of sync committee subscriptions. + /// + /// This will: + /// - Search for peers for required subnets. + /// - Request subscriptions required subnets. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions( + &mut self, + subscriptions: Vec, + ) -> Result<(), String> { + let mut subnets_to_discover = Vec::new(); + for subscription in subscriptions { + metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); + //NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the subnet service. + // This will subscribe to long-lived random subnets if required. + trace!(self.log, + "Sync committee subscription"; + "subscription" => ?subscription, + ); + + let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( + &subscription.sync_committee_indices, + ) { + Ok(subnet_ids) => subnet_ids, + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for sync committee subscription"; + "error" => ?e, + "validator_index" => subscription.validator_index + ); + continue; + } + }; + + for subnet_id in subnet_ids { + let exact_subnet = ExactSubnet { + subnet_id, + until_epoch: subscription.until_epoch, + }; + subnets_to_discover.push(exact_subnet.clone()); + if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { + warn!(self.log, + "Subscription to sync subnet error"; + "error" => e, + "validator_index" => subscription.validator_index, + ); + } else { + trace!(self.log, + "Subscribed to subnet for sync committee duties"; + "exact_subnet" => ?exact_subnet, + "validator_index" => subscription.validator_index + ); + } + } + } + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + }; + } + + // pre-emptively wake the thread to check for new events + if let Some(waker) = &self.waker { + waker.wake_by_ref(); + } + Ok(()) + } + + /* Internal private functions */ + + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + fn discover_peers_request<'a>( + &mut self, + exact_subnets: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + let discovery_subnets: Vec = exact_subnets + .filter_map(|exact_subnet| { + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // check if there is enough time to perform a discovery lookup + if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { + // if the slot is more than epoch away, add an event to start looking for peers + // add one slot to ensure we keep the peer for the subscription slot + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(until_slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { + subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), + min_ttl, + }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!(self.log, + "Not enough time for a discovery search"; + "subnet_id" => ?exact_subnet + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.events + .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + /// Adds a subscription event and an associated unsubscription event if required. + fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + // Return if we have subscribed to all subnets + if self.subscribe_all_subnets { + return Ok(()); + } + + // Return if we already have a subscription for exact_subnet + if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { + return Ok(()); + } + + // Return if we already have subscription set to expire later than the current request. + if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { + if *until_epoch >= exact_subnet.until_epoch { + return Ok(()); + } + } + + // initialise timing variables + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // Calculate the duration to the unsubscription event. + let expected_end_subscription_duration = if current_slot >= until_slot { + warn!( + self.log, + "Sync committee subscription is past expiration"; + "current_slot" => current_slot, + "exact_subnet" => ?exact_subnet, + ); + return Ok(()); + } else { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // the duration until we no longer need this subscription. We assume a single slot is + // sufficient. + self.beacon_chain + .slot_clock + .duration_to_slot(until_slot) + .ok_or("Unable to determine duration to unsubscription slot")? + + slot_duration + }; + + if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { + // We are not currently subscribed and have no waiting subscription, create one + debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "until_epoch" => ?exact_subnet.until_epoch); + e.insert(exact_subnet.until_epoch); + self.events + .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add the subnet to the ENR bitfield + self.events + .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add an unsubscription event to remove ourselves from the subnet once completed + self.unsubscriptions + .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); + } else { + // We are already subscribed, extend the unsubscription duration + self.unsubscriptions + .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + } + + Ok(()) + } + + /// A queued unsubscription is ready. + fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { + debug!(self.log, "Unsubscribing from subnet"; "subnet" => *subnet_id); + + self.subscriptions.remove(&subnet_id); + self.events + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( + subnet_id, + ))); + + self.events + .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( + subnet_id, + ))); + } +} + +impl Stream for SyncCommitteeService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // update the waker if needed + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // process any un-subscription events + match self.unsubscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // process any generated events + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + Poll::Pending + } +} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs new file mode 100644 index 000000000..6ad083764 --- /dev/null +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -0,0 +1,573 @@ +use super::*; +use beacon_chain::{ + builder::{BeaconChainBuilder, Witness}, + eth1_chain::CachingEth1Backend, + BeaconChain, +}; +use eth2_libp2p::NetworkConfig; +use futures::prelude::*; +use genesis::{generate_deterministic_keypairs, interop_genesis_state}; +use lazy_static::lazy_static; +use slog::Logger; +use sloggers::{null::NullLoggerBuilder, Build}; +use slot_clock::{SlotClock, SystemTimeSlotClock}; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; +use store::config::StoreConfig; +use store::{HotColdDB, MemoryStore}; +use types::{ + CommitteeIndex, Epoch, EthSpec, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, ValidatorSubscription, +}; + +const SLOT_DURATION_MILLIS: u64 = 400; + +type TestBeaconChainType = Witness< + SystemTimeSlotClock, + CachingEth1Backend, + MainnetEthSpec, + MemoryStore, + MemoryStore, +>; + +pub struct TestBeaconChain { + chain: Arc>, +} + +impl TestBeaconChain { + pub fn new_with_system_clock() -> Self { + let spec = MainnetEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(1); + + let log = get_logger(); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); + + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + + let chain = Arc::new( + BeaconChainBuilder::new(MainnetEthSpec) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(Arc::new(store)) + .genesis_state( + interop_genesis_state::(&keypairs, 0, &spec) + .expect("should generate interop state"), + ) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(recent_genesis_time()), + Duration::from_millis(SLOT_DURATION_MILLIS), + )) + .shutdown_sender(shutdown_tx) + .monitor_validators(true, vec![], log) + .build() + .expect("should build"), + ); + Self { chain } + } +} + +pub fn recent_genesis_time() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() +} + +fn get_logger() -> Logger { + NullLoggerBuilder.build().expect("logger should build") +} + +lazy_static! { + static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); +} + +fn get_attestation_service() -> AttestationService { + let log = get_logger(); + let config = NetworkConfig::default(); + + let beacon_chain = CHAIN.chain.clone(); + + AttestationService::new(beacon_chain, &config, &log) +} + +fn get_sync_committee_service() -> SyncCommitteeService { + let log = get_logger(); + let config = NetworkConfig::default(); + + let beacon_chain = CHAIN.chain.clone(); + + SyncCommitteeService::new(beacon_chain, &config, &log) +} + +// gets a number of events from the subscription service, or returns none if it times out after a number +// of slots +async fn get_events + Unpin>( + stream: &mut S, + num_events: Option, + num_slots_before_timeout: u32, +) -> Vec { + let mut events = Vec::new(); + + let collect_stream_fut = async { + loop { + if let Some(result) = stream.next().await { + events.push(result); + if let Some(num) = num_events { + if events.len() == num { + return; + } + } + } + } + }; + + tokio::select! { + _ = collect_stream_fut => events, + _ = tokio::time::sleep( + Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, + ) => events + } +} + +mod attestation_service { + use super::*; + + fn get_subscription( + validator_index: u64, + attestation_committee_index: CommitteeIndex, + slot: Slot, + committee_count_at_slot: u64, + ) -> ValidatorSubscription { + let is_aggregator = true; + ValidatorSubscription { + validator_index, + attestation_committee_index, + slot, + committee_count_at_slot, + is_aggregator, + } + } + + fn get_subscriptions( + validator_count: u64, + slot: Slot, + committee_count_at_slot: u64, + ) -> Vec { + (0..validator_count) + .map(|validator_index| { + get_subscription( + validator_index, + validator_index, + slot, + committee_count_at_slot, + ) + }) + .collect() + } + + #[tokio::test] + async fn subscribe_current_slot_wait_for_unsubscribe() { + // subscription config + let validator_index = 1; + let committee_index = 1; + // Keep a low subscription slot so that there are no additional subnet discovery events. + let subscription_slot = 0; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + committee_count, + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // not enough time for peer discovery, just subscribe, unsubscribe + let subnet_id = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot), + committee_index, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + let expected = vec![ + SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), + SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), + ]; + + // Wait for 1 slot duration to get the unsubscription event + let events = get_events( + &mut attestation_service, + Some(5), + (MainnetEthSpec::slots_per_epoch() * 3) as u32, + ) + .await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3) + ] + ); + + // If the long lived and short lived subnets are the same, there should be no more events + // as we don't resubscribe already subscribed subnets. + if !attestation_service.random_subnets.contains(&subnet_id) { + assert_eq!(expected[..], events[3..]); + } + // Should be subscribed to only 1 long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), 1); + } + + /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[tokio::test] + async fn test_same_subnet_unsubscription() { + // subscription config + let validator_index = 1; + let committee_count = 1; + + // Makes 2 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + let subscription_slot1 = 0; + let subscription_slot2 = 1; + let com1 = 1; + let com2 = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let sub1 = get_subscription( + validator_index, + com1, + current_slot + Slot::new(subscription_slot1), + committee_count, + ); + + let sub2 = get_subscription( + validator_index, + com2, + current_slot + Slot::new(subscription_slot2), + committee_count, + ); + + let subnet_id1 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot1), + com1, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id2 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot2), + com2, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + // Assert that subscriptions are different but their subnet is the same + assert_ne!(sub1, sub2); + assert_eq!(subnet_id1, subnet_id2); + + // submit the subscriptions + attestation_service + .validator_subscriptions(vec![sub1, sub2]) + .unwrap(); + + // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) + // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). + let events = get_events(&mut attestation_service, None, 1).await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3) + ] + ); + + let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + + // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. + if !attestation_service.random_subnets.contains(&subnet_id1) { + assert_eq!(expected, events[3]); + assert_eq!(attestation_service.subscription_count(), 2); + } else { + assert_eq!(attestation_service.subscription_count(), 1); + } + + // Get event for 1 more slot duration, we should get the unsubscribe event now. + let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; + + // If the long lived and short lived subnets are different, we should get an unsubscription event. + if !attestation_service.random_subnets.contains(&subnet_id1) { + assert_eq!( + [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet_id1 + ))], + unsubscribe_event[..] + ); + } + + // Should be subscribed to only 1 long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), 1); + } + + #[tokio::test] + async fn subscribe_all_random_subnets() { + let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; + let subscription_slot = 10; + let subscription_count = attestation_subnet_count; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions( + subscription_count, + current_slot + subscription_slot, + committee_count, + ); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(&mut attestation_service, None, 3).await; + let mut discover_peer_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in &events { + match event { + SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, + SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, + _ => unexpected_msg_count += 1, + } + } + + // The bulk discovery request length should be equal to validator_count + let bulk_discovery_event = events.last().unwrap(); + if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { + assert_eq!(d.len(), attestation_subnet_count as usize); + } else { + panic!("Unexpected event {:?}", bulk_discovery_event); + } + + // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. + assert_eq!(discover_peer_count, subscription_count + 1); + assert_eq!(attestation_service.subscription_count(), 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + // test completed successfully + } + + #[tokio::test] + async fn subscribe_all_random_subnets_plus_one() { + let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; + let subscription_slot = 10; + // the 65th subscription should result in no more messages than the previous scenario + let subscription_count = attestation_subnet_count + 1; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions( + subscription_count, + current_slot + subscription_slot, + committee_count, + ); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(&mut attestation_service, None, 3).await; + let mut discover_peer_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in &events { + match event { + SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, + SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, + _ => unexpected_msg_count += 1, + } + } + + // The bulk discovery request length shouldn't exceed max attestation_subnet_count + let bulk_discovery_event = events.last().unwrap(); + if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { + assert_eq!(d.len(), attestation_subnet_count as usize); + } else { + panic!("Unexpected event {:?}", bulk_discovery_event); + } + // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. + // For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. + assert_eq!(discover_peer_count, 64 + 1); + assert_eq!(attestation_service.subscription_count(), 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + } +} + +mod sync_committee_service { + use super::*; + + #[tokio::test] + async fn subscribe_and_unsubscribe() { + // subscription config + let validator_index = 1; + let until_epoch = Epoch::new(1); + let sync_committee_indices = vec![1]; + + // create the attestation service and subscriptions + let mut sync_committee_service = get_sync_committee_service(); + + let subscriptions = vec![SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let subnet_ids = SyncSubnetId::compute_subnets_for_sync_committee::( + &sync_committee_indices, + ) + .unwrap(); + let subnet_id = subnet_ids.iter().next().unwrap(); + + // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) + let events = get_events( + &mut sync_committee_service, + Some(5), + (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events + ) + .await; + assert_eq!( + events[..2], + [ + SubnetServiceMessage::Subscribe(Subnet::SyncCommittee(*subnet_id)), + SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee(*subnet_id)) + ] + ); + matches::assert_matches!( + events[2..], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Unsubscribe(_), + SubnetServiceMessage::EnrRemove(_), + ] + ); + + // Should be unsubscribed at the end. + assert_eq!(sync_committee_service.subscription_count(), 0); + } + + #[tokio::test] + async fn same_subscription_with_lower_until_epoch() { + // subscription config + let validator_index = 1; + let until_epoch = Epoch::new(2); + let sync_committee_indices = vec![1]; + + // create the attestation service and subscriptions + let mut sync_committee_service = get_sync_committee_service(); + + let subscriptions = vec![SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // Get all immediate events (won't include unsubscriptions) + let events = get_events(&mut sync_committee_service, None, 1).await; + matches::assert_matches!( + events[..], + [ + SubnetServiceMessage::Subscribe(Subnet::SyncCommittee(_)), + SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee(_)), + SubnetServiceMessage::DiscoverPeers(_), + ] + ); + + // Additional subscriptions which shouldn't emit any non-discovery events + // Event 1 is a duplicate of an existing subscription + // Event 2 is the same subscription with lower `until_epoch` than the existing subscription + let subscriptions = vec![ + SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }, + SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch: until_epoch - 1, + }, + ]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // Get all immediate events (won't include unsubscriptions) + let events = get_events(&mut sync_committee_service, None, 1).await; + matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); + + // Should be unsubscribed at the end. + assert_eq!(sync_committee_service.subscription_count(), 1); + } +} diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index f5869a7e8..00a1ab560 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -89,7 +89,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { let genesis_state = eth2_network_config.beacon_state::()?; slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); - let enr_fork = spec.enr_fork_id( + let enr_fork = spec.enr_fork_id::( types::Slot::from(0u64), genesis_state.genesis_validators_root(), ); @@ -111,7 +111,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { - builder.add_value("eth2", &enr_fork_bytes); + builder.add_value("eth2", enr_fork_bytes.as_slice()); } builder .build(&local_key) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 264f6c587..6b43e7857 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -445,6 +445,7 @@ pub struct MetaData { #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, + pub syncnets: String, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 67caabd57..0de0683f2 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -65,101 +65,6 @@ impl BeaconBlock { } } - /// Return a block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> BeaconBlock { - let header = BeaconBlockHeader { - slot: Slot::new(1), - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body_root: Hash256::zero(), - }; - - let signed_header = SignedBeaconBlockHeader { - message: header, - signature: Signature::empty(), - }; - let indexed_attestation: IndexedAttestation = IndexedAttestation { - attesting_indices: VariableList::new(vec![ - 0_u64; - T::MaxValidatorsPerCommittee::to_usize() - ]) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - - let deposit_data = DepositData { - pubkey: PublicKeyBytes::empty(), - withdrawal_credentials: Hash256::zero(), - amount: 0, - signature: SignatureBytes::empty(), - }; - let proposer_slashing = ProposerSlashing { - signed_header_1: signed_header.clone(), - signed_header_2: signed_header, - }; - - let attester_slashing = AttesterSlashing { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, - }; - - let attestation: Attestation = Attestation { - aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - - let deposit = Deposit { - proof: FixedVector::from_elem(Hash256::zero()), - data: deposit_data, - }; - - let voluntary_exit = VoluntaryExit { - epoch: Epoch::new(1), - validator_index: 1, - }; - - let signed_voluntary_exit = SignedVoluntaryExit { - message: voluntary_exit, - signature: Signature::empty(), - }; - - // FIXME(altair): use an Altair block (they're bigger) - let mut block = BeaconBlockBase::::empty(spec); - for _ in 0..T::MaxProposerSlashings::to_usize() { - block - .body - .proposer_slashings - .push(proposer_slashing.clone()) - .unwrap(); - } - for _ in 0..T::MaxDeposits::to_usize() { - block.body.deposits.push(deposit.clone()).unwrap(); - } - for _ in 0..T::MaxVoluntaryExits::to_usize() { - block - .body - .voluntary_exits - .push(signed_voluntary_exit.clone()) - .unwrap(); - } - for _ in 0..T::MaxAttesterSlashings::to_usize() { - block - .body - .attester_slashings - .push(attester_slashing.clone()) - .unwrap(); - } - - for _ in 0..T::MaxAttestations::to_usize() { - block.body.attestations.push(attestation.clone()).unwrap(); - } - BeaconBlock::Base(block) - } - /// Custom SSZ decoder that takes a `ChainSpec` as context. pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { let slot_len = ::ssz_fixed_len(); @@ -314,10 +219,104 @@ impl BeaconBlockBase { }, } } + + /// Return a block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let header = BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }; + + let signed_header = SignedBeaconBlockHeader { + message: header, + signature: Signature::empty(), + }; + let indexed_attestation: IndexedAttestation = IndexedAttestation { + attesting_indices: VariableList::new(vec![ + 0_u64; + T::MaxValidatorsPerCommittee::to_usize() + ]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::zero(), + amount: 0, + signature: SignatureBytes::empty(), + }; + let proposer_slashing = ProposerSlashing { + signed_header_1: signed_header.clone(), + signed_header_2: signed_header, + }; + + let attester_slashing = AttesterSlashing { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + let attestation: Attestation = Attestation { + aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + + let deposit = Deposit { + proof: FixedVector::from_elem(Hash256::zero()), + data: deposit_data, + }; + + let voluntary_exit = VoluntaryExit { + epoch: Epoch::new(1), + validator_index: 1, + }; + + let signed_voluntary_exit = SignedVoluntaryExit { + message: voluntary_exit, + signature: Signature::empty(), + }; + + let mut block = BeaconBlockBase::::empty(spec); + for _ in 0..T::MaxProposerSlashings::to_usize() { + block + .body + .proposer_slashings + .push(proposer_slashing.clone()) + .unwrap(); + } + for _ in 0..T::MaxDeposits::to_usize() { + block.body.deposits.push(deposit.clone()).unwrap(); + } + for _ in 0..T::MaxVoluntaryExits::to_usize() { + block + .body + .voluntary_exits + .push(signed_voluntary_exit.clone()) + .unwrap(); + } + for _ in 0..T::MaxAttesterSlashings::to_usize() { + block + .body + .attester_slashings + .push(attester_slashing.clone()) + .unwrap(); + } + + for _ in 0..T::MaxAttestations::to_usize() { + block.body.attestations.push(attestation.clone()).unwrap(); + } + block + } } impl BeaconBlockAltair { - /// Returns an empty block to be used during genesis. + /// Returns an empty Altair block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, @@ -341,6 +340,36 @@ impl BeaconBlockAltair { }, } } + + /// Return an Altair block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block = BeaconBlockBase::full(spec); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockAltair { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + }, + } + } } #[cfg(test)] diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7fbb4ea5f..043007dec 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -148,26 +148,49 @@ impl ChainSpec { } /// Returns an `EnrForkId` for the given `slot`. - /// - /// Presently, we don't have any forks so we just ignore the slot. In the future this function - /// may return something different based upon the slot. - pub fn enr_fork_id(&self, _slot: Slot, genesis_validators_root: Hash256) -> EnrForkId { + pub fn enr_fork_id( + &self, + slot: Slot, + genesis_validators_root: Hash256, + ) -> EnrForkId { EnrForkId { - fork_digest: Self::compute_fork_digest( - self.genesis_fork_version, - genesis_validators_root, - ), - next_fork_version: self.genesis_fork_version, - next_fork_epoch: self.far_future_epoch, + fork_digest: self.fork_digest::(slot, genesis_validators_root), + next_fork_version: self.next_fork_version(), + next_fork_epoch: self + .next_fork_epoch::(slot) + .map(|(_, e)| e) + .unwrap_or(self.far_future_epoch), } } - /// Returns the epoch of the next scheduled change in the `fork.current_version`. + /// Returns the `ForkDigest` for the given slot. /// - /// There are no future forks scheduled so this function always returns `None`. This may not - /// always be the case in the future, though. - pub fn next_fork_epoch(&self) -> Option { - None + /// If `self.altair_fork_epoch == None`, then this function returns the genesis fork digest + /// otherwise, returns the fork digest based on the slot. + pub fn fork_digest(&self, slot: Slot, genesis_validators_root: Hash256) -> [u8; 4] { + let fork_name = self.fork_name_at_slot::(slot); + Self::compute_fork_digest( + self.fork_version_for_name(fork_name), + genesis_validators_root, + ) + } + + /// Returns the `next_fork_version`. + /// + /// Since `next_fork_version = current_fork_version` if no future fork is planned, + /// this function returns `altair_fork_version` until the next fork is planned. + pub fn next_fork_version(&self) -> [u8; 4] { + self.altair_fork_version + } + + /// Returns the epoch of the next scheduled fork along with its corresponding `ForkName`. + /// + /// If no future forks are scheduled, this function returns `None`. + pub fn next_fork_epoch(&self, slot: Slot) -> Option<(ForkName, Epoch)> { + let current_fork_name = self.fork_name_at_slot::(slot); + let next_fork_name = current_fork_name.next_fork()?; + let fork_epoch = self.fork_epoch(next_fork_name)?; + Some((next_fork_name, fork_epoch)) } /// Returns the name of the fork which is active at `slot`. diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 3f59d2b90..6e21edf9f 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -78,6 +78,8 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + * New in Altair */ type SyncCommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The number of `sync_committee` subnets. + type SyncCommitteeSubnetCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -218,6 +220,7 @@ impl EthSpec for MainnetEthSpec { type MaxDeposits = U16; type MaxVoluntaryExits = U16; type SyncCommitteeSize = U512; + type SyncCommitteeSubnetCount = U4; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -250,6 +253,7 @@ impl EthSpec for MinimalEthSpec { params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, SubnetBitfieldLength, + SyncCommitteeSubnetCount, MaxValidatorsPerCommittee, GenesisEpoch, HistoricalRootsLimit, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs new file mode 100644 index 000000000..6da188570 --- /dev/null +++ b/consensus/types/src/fork_context.rs @@ -0,0 +1,91 @@ +use parking_lot::RwLock; + +use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot}; +use std::collections::HashMap; + +/// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. +#[derive(Debug)] +pub struct ForkContext { + current_fork: RwLock, + fork_to_digest: HashMap, + digest_to_fork: HashMap<[u8; 4], ForkName>, +} + +impl ForkContext { + /// Creates a new `ForkContext` object by enumerating all enabled forks and computing their + /// fork digest. + /// + /// A fork is disabled in the `ChainSpec` if the activation slot corresponding to that fork is `None`. + pub fn new( + current_slot: Slot, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let mut fork_to_digest = vec![( + ForkName::Base, + ChainSpec::compute_fork_digest(spec.genesis_fork_version, genesis_validators_root), + )]; + + // Only add Altair to list of forks if it's enabled (i.e. spec.altair_fork_epoch != None) + if spec.altair_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Altair, + ChainSpec::compute_fork_digest(spec.altair_fork_version, genesis_validators_root), + )) + } + + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); + + let digest_to_fork = fork_to_digest + .clone() + .into_iter() + .map(|(k, v)| (v, k)) + .collect(); + + Self { + current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), + fork_to_digest, + digest_to_fork, + } + } + + /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. + pub fn fork_exists(&self, fork_name: ForkName) -> bool { + self.fork_to_digest.contains_key(&fork_name) + } + + /// Returns the `current_fork`. + pub fn current_fork(&self) -> ForkName { + *self.current_fork.read() + } + + /// Updates the `current_fork` field to a new fork. + pub fn update_current_fork(&self, new_fork: ForkName) { + *self.current_fork.write() = new_fork; + } + + /// Returns the context bytes/fork_digest corresponding to the genesis fork version. + pub fn genesis_context_bytes(&self) -> [u8; 4] { + *self + .fork_to_digest + .get(&ForkName::Base) + .expect("ForkContext must contain genesis context bytes") + } + + /// Returns the fork type given the context bytes/fork_digest. + /// Returns `None` if context bytes doesn't correspond to any valid `ForkName`. + pub fn from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { + self.digest_to_fork.get(&context) + } + + /// Returns the context bytes/fork_digest corresponding to a fork name. + /// Returns `None` if the `ForkName` has not been initialized. + pub fn to_context_bytes(&self, fork_name: ForkName) -> Option<[u8; 4]> { + self.fork_to_digest.get(&fork_name).cloned() + } + + /// Returns all `fork_digest`s that are currently in the `ForkContext` object. + pub fn all_fork_digests(&self) -> Vec<[u8; 4]> { + self.digest_to_fork.keys().cloned().collect() + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7df65cb26..b8876ccf8 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -55,12 +55,14 @@ pub mod signed_beacon_block_header; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; +pub mod sync_committee_subscription; pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; pub mod config_and_preset; +pub mod fork_context; pub mod participation_flags; pub mod participation_list; pub mod preset; @@ -107,6 +109,7 @@ pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::fork::Fork; +pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::free_attestation::FreeAttestation; @@ -136,6 +139,7 @@ pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; pub use crate::sync_committee::SyncCommittee; pub use crate::sync_committee_contribution::SyncCommitteeContribution; pub use crate::sync_committee_message::SyncCommitteeMessage; +pub use crate::sync_committee_subscription::SyncCommitteeSubscription; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::Validator; diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs new file mode 100644 index 000000000..287322587 --- /dev/null +++ b/consensus/types/src/sync_committee_subscription.rs @@ -0,0 +1,15 @@ +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// A sync committee subscription created when a validator subscribes to sync committee subnets to perform +/// sync committee duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct SyncCommitteeSubscription { + /// The validators index. + pub validator_index: u64, + /// The sync committee indices. + pub sync_committee_indices: Vec, + /// Epoch until which this subscription is required. + pub until_epoch: Epoch, +} diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index fba0b2993..b15e49ed9 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -1,6 +1,11 @@ //! Identifies each sync committee subnet by an integer identifier. use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use crate::EthSpec; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; +use ssz_types::typenum::Unsigned; +use std::collections::HashSet; +use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; lazy_static! { @@ -33,6 +38,24 @@ impl SyncSubnetId { pub fn new(id: u64) -> Self { id.into() } + + /// Compute required subnets to subscribe to given the sync committee indices. + pub fn compute_subnets_for_sync_committee( + sync_committee_indices: &[u64], + ) -> Result, ArithError> { + let subcommittee_size = T::SyncSubcommitteeSize::to_u64(); + + sync_committee_indices + .iter() + .map(|index| index.safe_div(subcommittee_size).map(Self::new)) + .collect() + } +} + +impl Display for SyncSubnetId { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl Deref for SyncSubnetId {