Merge branch 'unstable' into deneb-merge-from-unstable-20230627
# Conflicts: # beacon_node/beacon_chain/src/beacon_chain.rs # beacon_node/beacon_chain/src/block_verification.rs # beacon_node/beacon_chain/src/lib.rs # beacon_node/beacon_chain/src/test_utils.rs # beacon_node/beacon_chain/tests/block_verification.rs # beacon_node/beacon_chain/tests/store_tests.rs # beacon_node/beacon_chain/tests/tests.rs # beacon_node/http_api/src/publish_blocks.rs # beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs # beacon_node/lighthouse_network/src/rpc/methods.rs # beacon_node/lighthouse_network/src/rpc/outbound.rs # beacon_node/lighthouse_network/src/rpc/protocol.rs # beacon_node/lighthouse_network/src/service/api_types.rs # beacon_node/network/src/beacon_processor/worker/gossip_methods.rs # beacon_node/network/src/beacon_processor/worker/rpc_methods.rs # beacon_node/network/src/beacon_processor/worker/sync_methods.rs # beacon_node/network/src/sync/block_lookups/single_block_lookup.rs # beacon_node/network/src/sync/network_context.rs # beacon_node/network/src/sync/range_sync/batch.rs # beacon_node/network/src/sync/range_sync/chain.rs # common/eth2/src/types.rs # consensus/fork_choice/src/fork_choice.rs
This commit is contained in:
commit
97c4660761
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@ -134,17 +134,11 @@ jobs:
|
||||
|
||||
- name: Build Lighthouse for Windows portable
|
||||
if: matrix.arch == 'x86_64-windows-portable'
|
||||
# NOTE: profile set to release until this rustc issue is fixed:
|
||||
#
|
||||
# https://github.com/rust-lang/rust/issues/107781
|
||||
#
|
||||
# tracked at: https://github.com/sigp/lighthouse/issues/3964
|
||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release
|
||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }}
|
||||
|
||||
- name: Build Lighthouse for Windows modern
|
||||
if: matrix.arch == 'x86_64-windows'
|
||||
# NOTE: profile set to release (see above)
|
||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release
|
||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }}
|
||||
|
||||
- name: Configure GPG and create artifacts
|
||||
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
||||
|
310
Cargo.lock
generated
310
Cargo.lock
generated
@ -566,6 +566,12 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce"
|
||||
|
||||
[[package]]
|
||||
name = "base16ct"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
@ -1416,6 +1422,18 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-bigint"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
@ -1511,11 +1529,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "curve25519-dalek"
|
||||
version = "4.0.0-rc.1"
|
||||
version = "4.0.0-rc.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16"
|
||||
checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"digest 0.10.7",
|
||||
"fiat-crypto",
|
||||
"packed_simd_2",
|
||||
"platforms 3.0.2",
|
||||
@ -1698,6 +1717,16 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "der"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "der-parser"
|
||||
version = "7.0.0"
|
||||
@ -1844,6 +1873,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer 0.10.4",
|
||||
"const-oid",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
@ -1900,15 +1930,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "discv5"
|
||||
version = "0.2.2"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b009a99b85b58900df46435307fc5c4c845af7e182582b1fbf869572fa9fce69"
|
||||
checksum = "77f32d27968ba86689e3f0eccba0383414348a6fc5918b0a639c98dd81e20ed6"
|
||||
dependencies = [
|
||||
"aes 0.7.5",
|
||||
"aes-gcm 0.9.4",
|
||||
"arrayvec",
|
||||
"delay_map",
|
||||
"enr 0.7.0",
|
||||
"enr 0.8.1",
|
||||
"fnv",
|
||||
"futures",
|
||||
"hashlink 0.7.0",
|
||||
@ -1924,8 +1954,6 @@ dependencies = [
|
||||
"smallvec",
|
||||
"socket2 0.4.9",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util 0.6.10",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uint",
|
||||
@ -1961,10 +1989,24 @@ version = "0.14.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c"
|
||||
dependencies = [
|
||||
"der",
|
||||
"elliptic-curve",
|
||||
"rfc6979",
|
||||
"signature",
|
||||
"der 0.6.1",
|
||||
"elliptic-curve 0.12.3",
|
||||
"rfc6979 0.3.1",
|
||||
"signature 1.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ecdsa"
|
||||
version = "0.16.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428"
|
||||
dependencies = [
|
||||
"der 0.7.6",
|
||||
"digest 0.10.7",
|
||||
"elliptic-curve 0.13.5",
|
||||
"rfc6979 0.4.0",
|
||||
"signature 2.1.0",
|
||||
"spki 0.7.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1973,7 +2015,17 @@ version = "1.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
|
||||
dependencies = [
|
||||
"signature",
|
||||
"signature 1.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ed25519"
|
||||
version = "2.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963"
|
||||
dependencies = [
|
||||
"pkcs8 0.10.2",
|
||||
"signature 2.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1983,13 +2035,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
|
||||
dependencies = [
|
||||
"curve25519-dalek 3.2.0",
|
||||
"ed25519",
|
||||
"ed25519 1.5.3",
|
||||
"rand 0.7.3",
|
||||
"serde",
|
||||
"sha2 0.9.9",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ed25519-dalek"
|
||||
version = "2.0.0-rc.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a"
|
||||
dependencies = [
|
||||
"curve25519-dalek 4.0.0-rc.2",
|
||||
"ed25519 2.2.1",
|
||||
"rand_core 0.6.4",
|
||||
"serde",
|
||||
"sha2 0.10.6",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ef_tests"
|
||||
version = "0.2.0"
|
||||
@ -2037,18 +2103,37 @@ version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3"
|
||||
dependencies = [
|
||||
"base16ct",
|
||||
"crypto-bigint",
|
||||
"der",
|
||||
"base16ct 0.1.1",
|
||||
"crypto-bigint 0.4.9",
|
||||
"der 0.6.1",
|
||||
"digest 0.10.7",
|
||||
"ff",
|
||||
"ff 0.12.1",
|
||||
"generic-array",
|
||||
"group",
|
||||
"group 0.12.1",
|
||||
"hkdf",
|
||||
"pem-rfc7468",
|
||||
"pkcs8",
|
||||
"pkcs8 0.9.0",
|
||||
"rand_core 0.6.4",
|
||||
"sec1",
|
||||
"sec1 0.3.0",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "elliptic-curve"
|
||||
version = "0.13.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b"
|
||||
dependencies = [
|
||||
"base16ct 0.2.0",
|
||||
"crypto-bigint 0.5.2",
|
||||
"digest 0.10.7",
|
||||
"ff 0.13.0",
|
||||
"generic-array",
|
||||
"group 0.13.0",
|
||||
"pkcs8 0.10.2",
|
||||
"rand_core 0.6.4",
|
||||
"sec1 0.7.2",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
@ -2072,7 +2157,7 @@ dependencies = [
|
||||
"bs58",
|
||||
"bytes",
|
||||
"hex",
|
||||
"k256",
|
||||
"k256 0.11.6",
|
||||
"log",
|
||||
"rand 0.8.5",
|
||||
"rlp",
|
||||
@ -2083,16 +2168,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "enr"
|
||||
version = "0.7.0"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad"
|
||||
checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"bs58",
|
||||
"bytes",
|
||||
"ed25519-dalek",
|
||||
"ed25519-dalek 2.0.0-rc.2",
|
||||
"hex",
|
||||
"k256",
|
||||
"k256 0.13.1",
|
||||
"log",
|
||||
"rand 0.8.5",
|
||||
"rlp",
|
||||
@ -2258,7 +2342,7 @@ dependencies = [
|
||||
"futures-util",
|
||||
"libsecp256k1",
|
||||
"lighthouse_network",
|
||||
"mime",
|
||||
"mediatype",
|
||||
"procinfo",
|
||||
"proto_array",
|
||||
"psutil",
|
||||
@ -2598,11 +2682,11 @@ dependencies = [
|
||||
"bytes",
|
||||
"cargo_metadata",
|
||||
"chrono",
|
||||
"elliptic-curve",
|
||||
"elliptic-curve 0.12.3",
|
||||
"ethabi 18.0.0",
|
||||
"generic-array",
|
||||
"hex",
|
||||
"k256",
|
||||
"k256 0.11.6",
|
||||
"once_cell",
|
||||
"open-fastrlp",
|
||||
"rand 0.8.5",
|
||||
@ -2785,6 +2869,16 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ff"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449"
|
||||
dependencies = [
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ffi-opaque"
|
||||
version = "2.0.1"
|
||||
@ -3063,6 +3157,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3172,7 +3267,18 @@ version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"ff 0.12.1",
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "group"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
|
||||
dependencies = [
|
||||
"ff 0.13.0",
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
]
|
||||
@ -3909,12 +4015,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"ecdsa",
|
||||
"elliptic-curve",
|
||||
"ecdsa 0.14.8",
|
||||
"elliptic-curve 0.12.3",
|
||||
"sha2 0.10.6",
|
||||
"sha3 0.10.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "k256"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"ecdsa 0.16.7",
|
||||
"elliptic-curve 0.13.5",
|
||||
"once_cell",
|
||||
"sha2 0.10.6",
|
||||
"signature 2.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "keccak"
|
||||
version = "0.1.4"
|
||||
@ -4131,7 +4251,7 @@ checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38"
|
||||
dependencies = [
|
||||
"asn1_der",
|
||||
"bs58",
|
||||
"ed25519-dalek",
|
||||
"ed25519-dalek 1.0.1",
|
||||
"either",
|
||||
"fnv",
|
||||
"futures",
|
||||
@ -4166,7 +4286,7 @@ checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f"
|
||||
dependencies = [
|
||||
"asn1_der",
|
||||
"bs58",
|
||||
"ed25519-dalek",
|
||||
"ed25519-dalek 1.0.1",
|
||||
"either",
|
||||
"fnv",
|
||||
"futures",
|
||||
@ -4185,7 +4305,7 @@ dependencies = [
|
||||
"prost-build",
|
||||
"rand 0.8.5",
|
||||
"rw-stream-sink",
|
||||
"sec1",
|
||||
"sec1 0.3.0",
|
||||
"sha2 0.10.6",
|
||||
"smallvec",
|
||||
"thiserror",
|
||||
@ -4294,7 +4414,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1"
|
||||
dependencies = [
|
||||
"bs58",
|
||||
"ed25519-dalek",
|
||||
"ed25519-dalek 1.0.1",
|
||||
"log",
|
||||
"multiaddr 0.17.1",
|
||||
"multihash 0.17.0",
|
||||
@ -4914,6 +5034,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mediatype"
|
||||
version = "0.19.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea6e62614ab2fc0faa58bb15102a0382d368f896a9fa4776592589ab55c4de7"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.5.0"
|
||||
@ -5638,9 +5764,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.52"
|
||||
version = "0.10.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56"
|
||||
checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
@ -5679,9 +5805,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.87"
|
||||
version = "0.9.90"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e"
|
||||
checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
@ -5726,8 +5852,8 @@ version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594"
|
||||
dependencies = [
|
||||
"ecdsa",
|
||||
"elliptic-curve",
|
||||
"ecdsa 0.14.8",
|
||||
"elliptic-curve 0.12.3",
|
||||
"sha2 0.10.6",
|
||||
]
|
||||
|
||||
@ -5737,8 +5863,8 @@ version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa"
|
||||
dependencies = [
|
||||
"ecdsa",
|
||||
"elliptic-curve",
|
||||
"ecdsa 0.14.8",
|
||||
"elliptic-curve 0.12.3",
|
||||
"sha2 0.10.6",
|
||||
]
|
||||
|
||||
@ -5994,8 +6120,18 @@ version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
|
||||
dependencies = [
|
||||
"der",
|
||||
"spki",
|
||||
"der 0.6.1",
|
||||
"spki 0.6.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
|
||||
dependencies = [
|
||||
"der 0.7.6",
|
||||
"spki 0.7.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -6738,11 +6874,21 @@ version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb"
|
||||
dependencies = [
|
||||
"crypto-bigint",
|
||||
"crypto-bigint 0.4.9",
|
||||
"hmac 0.12.1",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rfc6979"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
|
||||
dependencies = [
|
||||
"hmac 0.12.1",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
@ -7119,10 +7265,24 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928"
|
||||
dependencies = [
|
||||
"base16ct",
|
||||
"der",
|
||||
"base16ct 0.1.1",
|
||||
"der 0.6.1",
|
||||
"generic-array",
|
||||
"pkcs8",
|
||||
"pkcs8 0.9.0",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sec1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e"
|
||||
dependencies = [
|
||||
"base16ct 0.2.0",
|
||||
"der 0.7.6",
|
||||
"generic-array",
|
||||
"pkcs8 0.10.2",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
@ -7411,6 +7571,16 @@ dependencies = [
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signature"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500"
|
||||
dependencies = [
|
||||
"digest 0.10.7",
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
version = "0.6.2"
|
||||
@ -7651,14 +7821,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
|
||||
|
||||
[[package]]
|
||||
name = "snow"
|
||||
version = "0.9.2"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733"
|
||||
checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d"
|
||||
dependencies = [
|
||||
"aes-gcm 0.9.4",
|
||||
"blake2",
|
||||
"chacha20poly1305",
|
||||
"curve25519-dalek 4.0.0-rc.1",
|
||||
"curve25519-dalek 4.0.0-rc.2",
|
||||
"rand_core 0.6.4",
|
||||
"ring",
|
||||
"rustc_version 0.4.0",
|
||||
@ -7715,7 +7885,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"der",
|
||||
"der 0.6.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spki"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"der 0.7.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -9432,9 +9612,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "webrtc-dtls"
|
||||
version = "0.7.1"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05"
|
||||
checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267"
|
||||
dependencies = [
|
||||
"aes 0.6.0",
|
||||
"aes-gcm 0.10.2",
|
||||
@ -9445,29 +9625,28 @@ dependencies = [
|
||||
"ccm",
|
||||
"curve25519-dalek 3.2.0",
|
||||
"der-parser 8.2.0",
|
||||
"elliptic-curve",
|
||||
"elliptic-curve 0.12.3",
|
||||
"hkdf",
|
||||
"hmac 0.12.1",
|
||||
"log",
|
||||
"oid-registry 0.6.1",
|
||||
"p256",
|
||||
"p384",
|
||||
"rand 0.8.5",
|
||||
"rand_core 0.6.4",
|
||||
"rcgen 0.9.3",
|
||||
"rcgen 0.10.0",
|
||||
"ring",
|
||||
"rustls 0.19.1",
|
||||
"sec1",
|
||||
"sec1 0.3.0",
|
||||
"serde",
|
||||
"sha1",
|
||||
"sha2 0.10.6",
|
||||
"signature",
|
||||
"signature 1.6.4",
|
||||
"subtle",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"webpki 0.21.4",
|
||||
"webrtc-util",
|
||||
"x25519-dalek 2.0.0-pre.1",
|
||||
"x25519-dalek 2.0.0-rc.2",
|
||||
"x509-parser 0.13.2",
|
||||
]
|
||||
|
||||
@ -9910,12 +10089,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "x25519-dalek"
|
||||
version = "2.0.0-pre.1"
|
||||
version = "2.0.0-rc.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df"
|
||||
checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95"
|
||||
dependencies = [
|
||||
"curve25519-dalek 3.2.0",
|
||||
"curve25519-dalek 4.0.0-rc.2",
|
||||
"rand_core 0.6.4",
|
||||
"serde",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
|
6
Makefile
6
Makefile
@ -177,7 +177,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||
lint:
|
||||
cargo clippy --workspace --tests -- \
|
||||
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \
|
||||
-D clippy::fn_to_numeric_cast_any \
|
||||
-D warnings \
|
||||
-A clippy::uninlined-format-args \
|
||||
@ -188,6 +188,10 @@ lint:
|
||||
-A clippy::question-mark \
|
||||
-A clippy::uninlined-format-args
|
||||
|
||||
# Lints the code using Clippy and automatically fix some simple compiler warnings.
|
||||
lint-fix:
|
||||
EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint
|
||||
|
||||
nightly-lint:
|
||||
cp .github/custom/clippy.toml .
|
||||
cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
|
||||
|
@ -71,7 +71,6 @@ use execution_layer::{
|
||||
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
||||
PayloadAttributes, PayloadStatus,
|
||||
};
|
||||
pub use fork_choice::CountUnrealized;
|
||||
use fork_choice::{
|
||||
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
|
||||
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
|
||||
@ -2599,7 +2598,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub async fn process_chain_segment(
|
||||
self: &Arc<Self>,
|
||||
chain_segment: Vec<BlockWrapper<T::EthSpec>>,
|
||||
count_unrealized: CountUnrealized,
|
||||
notify_execution_layer: NotifyExecutionLayer,
|
||||
) -> ChainSegmentResult<T::EthSpec> {
|
||||
let mut imported_blocks = 0;
|
||||
@ -2666,7 +2664,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.process_block(
|
||||
signature_verified_block.block_root(),
|
||||
signature_verified_block,
|
||||
count_unrealized,
|
||||
notify_execution_layer,
|
||||
)
|
||||
.await
|
||||
@ -2759,13 +2756,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
pub async fn process_blob(
|
||||
self: &Arc<Self>,
|
||||
blob: GossipVerifiedBlob<T::EthSpec>,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
self.check_availability_and_maybe_import(
|
||||
blob.slot(),
|
||||
|chain| chain.data_availability_checker.put_gossip_blob(blob),
|
||||
count_unrealized,
|
||||
)
|
||||
self.check_availability_and_maybe_import(blob.slot(), |chain| {
|
||||
chain.data_availability_checker.put_gossip_blob(blob)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
@ -2789,7 +2783,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self: &Arc<Self>,
|
||||
block_root: Hash256,
|
||||
unverified_block: B,
|
||||
count_unrealized: CountUnrealized,
|
||||
notify_execution_layer: NotifyExecutionLayer,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
// Start the Prometheus timer.
|
||||
@ -2813,20 +2806,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.map_err(|e| self.handle_block_error(e))?;
|
||||
|
||||
match executed_block {
|
||||
ExecutedBlock::Available(block) => {
|
||||
self.import_available_block(Box::new(block), count_unrealized)
|
||||
.await
|
||||
}
|
||||
ExecutedBlock::Available(block) => self.import_available_block(Box::new(block)).await,
|
||||
ExecutedBlock::AvailabilityPending(block) => {
|
||||
self.check_availability_and_maybe_import(
|
||||
block.block.slot(),
|
||||
|chain| {
|
||||
self.check_availability_and_maybe_import(block.block.slot(), |chain| {
|
||||
chain
|
||||
.data_availability_checker
|
||||
.put_pending_executed_block(block)
|
||||
},
|
||||
count_unrealized,
|
||||
)
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
@ -2836,7 +2822,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// get a fully `ExecutedBlock`
|
||||
///
|
||||
/// An error is returned if the verification handle couldn't be awaited.
|
||||
async fn into_executed_block(
|
||||
pub async fn into_executed_block(
|
||||
self: Arc<Self>,
|
||||
execution_pending_block: ExecutionPendingBlock<T>,
|
||||
) -> Result<ExecutedBlock<T::EthSpec>, BlockError<T::EthSpec>> {
|
||||
@ -2925,13 +2911,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self: &Arc<Self>,
|
||||
slot: Slot,
|
||||
cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
let availability = cache_fn(self.clone())?;
|
||||
match availability {
|
||||
Availability::Available(block) => {
|
||||
self.import_available_block(block, count_unrealized).await
|
||||
}
|
||||
Availability::Available(block) => self.import_available_block(block).await,
|
||||
Availability::MissingComponents(block_root) => Ok(
|
||||
AvailabilityProcessingStatus::MissingComponents(slot, block_root),
|
||||
),
|
||||
@ -2941,7 +2924,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
async fn import_available_block(
|
||||
self: &Arc<Self>,
|
||||
block: Box<AvailableExecutedBlock<T::EthSpec>>,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
let AvailableExecutedBlock {
|
||||
block,
|
||||
@ -2971,7 +2953,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state,
|
||||
confirmed_state_roots,
|
||||
payload_verification_outcome.payload_verification_status,
|
||||
count_unrealized,
|
||||
parent_block,
|
||||
parent_eth1_finalization_data,
|
||||
consensus_context,
|
||||
@ -3017,7 +2998,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
mut state: BeaconState<T::EthSpec>,
|
||||
confirmed_state_roots: Vec<Hash256>,
|
||||
payload_verification_status: PayloadVerificationStatus,
|
||||
count_unrealized: CountUnrealized,
|
||||
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||
@ -3088,7 +3068,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&state,
|
||||
payload_verification_status,
|
||||
&self.spec,
|
||||
count_unrealized,
|
||||
)
|
||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||
}
|
||||
|
@ -149,8 +149,6 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
||||
/// its parent.
|
||||
ParentUnknown(BlockWrapper<T>),
|
||||
/// The block skips too many slots and is a DoS risk.
|
||||
TooManySkippedSlots { parent_slot: Slot, block_slot: Slot },
|
||||
/// The block slot is greater than the present slot.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -951,9 +949,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
parent_block.root
|
||||
};
|
||||
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
check_block_skip_slots(chain, parent_block.slot, block.message())?;
|
||||
|
||||
// We assign to a variable instead of using `if let Some` directly to ensure we drop the
|
||||
// write lock before trying to acquire it again in the `else` clause.
|
||||
let proposer_opt = chain
|
||||
@ -1110,9 +1105,6 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
|
||||
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
||||
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
|
||||
|
||||
let state = cheap_state_advance_to_obtain_committees(
|
||||
&mut parent.pre_state,
|
||||
parent.beacon_state_root,
|
||||
@ -1327,9 +1319,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
return Err(BlockError::ParentUnknown(block.into_block_wrapper()));
|
||||
}
|
||||
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
|
||||
|
||||
/*
|
||||
* Perform cursory checks to see if the block is even worth processing.
|
||||
*/
|
||||
@ -1688,30 +1677,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the count of skip slots between the block and its parent does not exceed our maximum
|
||||
/// value.
|
||||
///
|
||||
/// Whilst this is not part of the specification, we include this to help prevent us from DoS
|
||||
/// attacks. In times of dire network circumstance, the user can configure the
|
||||
/// `import_max_skip_slots` value.
|
||||
fn check_block_skip_slots<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
parent_slot: Slot,
|
||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
// Reject any block that exceeds our limit on skipped slots.
|
||||
if let Some(max_skip_slots) = chain.config.import_max_skip_slots {
|
||||
if block.slot() > parent_slot + max_skip_slots {
|
||||
return Err(BlockError::TooManySkippedSlots {
|
||||
parent_slot,
|
||||
block_slot: block.slot(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
|
||||
fn check_block_against_anchor_slot<T: BeaconChainTypes>(
|
||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
||||
|
@ -20,7 +20,7 @@ use crate::{
|
||||
};
|
||||
use eth1::Config as Eth1Config;
|
||||
use execution_layer::ExecutionLayer;
|
||||
use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses};
|
||||
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
||||
use futures::channel::mpsc::Sender;
|
||||
use kzg::{Kzg, TrustedSetup};
|
||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||
@ -701,7 +701,6 @@ where
|
||||
store.clone(),
|
||||
Some(current_slot),
|
||||
&self.spec,
|
||||
CountUnrealized::True,
|
||||
)?;
|
||||
}
|
||||
|
||||
|
@ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
|
||||
pub struct ChainConfig {
|
||||
/// Maximum number of slots to skip when importing a consensus message (e.g., block,
|
||||
/// attestation, etc).
|
||||
/// Maximum number of slots to skip when importing an attestation.
|
||||
///
|
||||
/// If `None`, there is no limit.
|
||||
pub import_max_skip_slots: Option<u64>,
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||
use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus};
|
||||
use fork_choice::{ForkChoice, PayloadVerificationStatus};
|
||||
use itertools::process_results;
|
||||
use slog::{info, warn, Logger};
|
||||
use state_processing::state_advance::complete_state_advance;
|
||||
@ -100,7 +100,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||
current_slot: Option<Slot>,
|
||||
spec: &ChainSpec,
|
||||
count_unrealized_config: CountUnrealized,
|
||||
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
||||
// Fetch finalized block.
|
||||
let finalized_checkpoint = head_state.finalized_checkpoint();
|
||||
@ -166,8 +165,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
||||
|
||||
let mut state = finalized_snapshot.beacon_state;
|
||||
let blocks_len = blocks.len();
|
||||
for (i, block) in blocks.into_iter().enumerate() {
|
||||
for block in blocks {
|
||||
complete_state_advance(&mut state, None, block.slot(), spec)
|
||||
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
||||
|
||||
@ -190,15 +188,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
// This scenario is so rare that it seems OK to double-verify some blocks.
|
||||
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
||||
|
||||
// Because we are replaying a single chain of blocks, we only need to calculate unrealized
|
||||
// justification for the last block in the chain.
|
||||
let is_last_block = i + 1 == blocks_len;
|
||||
let count_unrealized = if is_last_block {
|
||||
count_unrealized_config
|
||||
} else {
|
||||
CountUnrealized::False
|
||||
};
|
||||
|
||||
fork_choice
|
||||
.on_block(
|
||||
block.slot(),
|
||||
@ -209,7 +198,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
&state,
|
||||
payload_verification_status,
|
||||
spec,
|
||||
count_unrealized,
|
||||
)
|
||||
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ pub mod validator_pubkey_cache;
|
||||
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes,
|
||||
BeaconStore, ChainSegmentResult, CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate,
|
||||
BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate,
|
||||
ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
|
||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
|
@ -24,7 +24,6 @@ use execution_layer::{
|
||||
},
|
||||
ExecutionLayer,
|
||||
};
|
||||
use fork_choice::CountUnrealized;
|
||||
use futures::channel::mpsc::Receiver;
|
||||
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
@ -1758,12 +1757,7 @@ where
|
||||
self.set_current_slot(slot);
|
||||
let block_hash: SignedBeaconBlockHash = self
|
||||
.chain
|
||||
.process_block(
|
||||
block_root,
|
||||
block.into(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes)
|
||||
.await?
|
||||
.try_into()
|
||||
.unwrap();
|
||||
@ -1781,7 +1775,6 @@ where
|
||||
.process_block(
|
||||
wrapped_block.canonical_root(),
|
||||
wrapped_block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await?
|
||||
|
@ -5,8 +5,9 @@ use beacon_chain::{
|
||||
blob_verification::AsBlock,
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
};
|
||||
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
|
||||
use fork_choice::CountUnrealized;
|
||||
use beacon_chain::{
|
||||
BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use logging::test_logger;
|
||||
use slasher::{Config as SlasherConfig, Slasher};
|
||||
@ -153,18 +154,14 @@ async fn chain_segment_full_segment() {
|
||||
// Sneak in a little check to ensure we can process empty chain segments.
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(vec![], NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error()
|
||||
.expect("should import empty chain segment");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
blocks.clone(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error()
|
||||
.expect("should import chain segment");
|
||||
@ -196,11 +193,7 @@ async fn chain_segment_varying_chunk_size() {
|
||||
for chunk in blocks.chunks(*chunk_size) {
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
chunk.to_vec(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error()
|
||||
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
||||
@ -239,7 +232,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
@ -263,7 +256,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearParentRoots)
|
||||
@ -297,7 +290,7 @@ async fn chain_segment_non_linear_slots() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
@ -321,7 +314,7 @@ async fn chain_segment_non_linear_slots() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::NonLinearSlots)
|
||||
@ -347,7 +340,7 @@ async fn assert_invalid_signature(
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
@ -369,11 +362,7 @@ async fn assert_invalid_signature(
|
||||
// imported prior to this test.
|
||||
let _ = harness
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
ancestor_blocks,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||
.await;
|
||||
harness.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
@ -382,7 +371,6 @@ async fn assert_invalid_signature(
|
||||
.process_block(
|
||||
snapshots[block_index].beacon_block.canonical_root(),
|
||||
snapshots[block_index].beacon_block.clone(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await;
|
||||
@ -435,11 +423,7 @@ async fn invalid_signature_gossip_block() {
|
||||
.collect();
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
ancestor_blocks,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error()
|
||||
.expect("should import all blocks prior to the one being tested");
|
||||
@ -451,7 +435,6 @@ async fn invalid_signature_gossip_block() {
|
||||
.process_block(
|
||||
signed_block.canonical_root(),
|
||||
Arc::new(signed_block),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await,
|
||||
@ -486,7 +469,7 @@ async fn invalid_signature_block_proposal() {
|
||||
matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
@ -684,7 +667,7 @@ async fn invalid_signature_deposit() {
|
||||
!matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.into_block_error(),
|
||||
Err(BlockError::InvalidSignature)
|
||||
@ -764,7 +747,6 @@ async fn block_gossip_verification() {
|
||||
.process_block(
|
||||
gossip_verified.block_root,
|
||||
gossip_verified,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1039,7 +1021,6 @@ async fn verify_block_for_gossip_slashing_detection() {
|
||||
.process_block(
|
||||
verified_block.block_root,
|
||||
verified_block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1079,7 +1060,6 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
||||
.process_block(
|
||||
verified_block.block_root,
|
||||
verified_block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1227,7 +1207,6 @@ async fn add_base_block_to_altair_chain() {
|
||||
.process_block(
|
||||
base_block.canonical_root(),
|
||||
Arc::new(base_block.clone()),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1243,11 +1222,7 @@ async fn add_base_block_to_altair_chain() {
|
||||
assert!(matches!(
|
||||
harness
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
vec![Arc::new(base_block).into()],
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_chain_segment(vec![Arc::new(base_block).into()], NotifyExecutionLayer::Yes,)
|
||||
.await,
|
||||
ChainSegmentResult::Failed {
|
||||
imported_blocks: 0,
|
||||
@ -1366,7 +1341,6 @@ async fn add_altair_block_to_base_chain() {
|
||||
.process_block(
|
||||
altair_block.canonical_root(),
|
||||
Arc::new(altair_block.clone()),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1384,7 +1358,6 @@ async fn add_altair_block_to_base_chain() {
|
||||
.chain
|
||||
.process_chain_segment(
|
||||
vec![Arc::new(altair_block).into()],
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes
|
||||
)
|
||||
.await,
|
||||
@ -1397,3 +1370,100 @@ async fn add_altair_block_to_base_chain() {
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn import_duplicate_block_unrealized_justification() {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.spec(spec)
|
||||
.keypairs(KEYPAIRS[..].to_vec())
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
let chain = &harness.chain;
|
||||
|
||||
// Move out of the genesis slot.
|
||||
harness.advance_slot();
|
||||
|
||||
// Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2.
|
||||
let num_slots = E::slots_per_epoch() as usize * 8 / 3;
|
||||
harness
|
||||
.extend_chain(
|
||||
num_slots,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Move into the next empty slot.
|
||||
harness.advance_slot();
|
||||
|
||||
// The store's justified checkpoint must still be at epoch 0, while unrealized justification
|
||||
// must be at epoch 1.
|
||||
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
|
||||
drop(fc);
|
||||
|
||||
// Produce a block to justify epoch 2.
|
||||
let state = harness.get_current_state();
|
||||
let slot = harness.get_current_slot();
|
||||
let (block, _) = harness.make_block(state.clone(), slot).await;
|
||||
let block = Arc::new(block);
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
// Create two verified variants of the block, representing the same block being processed in
|
||||
// parallel.
|
||||
let notify_execution_layer = NotifyExecutionLayer::Yes;
|
||||
let verified_block1 = block
|
||||
.clone()
|
||||
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||
.unwrap();
|
||||
let verified_block2 = block
|
||||
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||
.unwrap();
|
||||
|
||||
// Import the first block, simulating a block processed via a finalized chain segment.
|
||||
chain
|
||||
.clone()
|
||||
.import_execution_pending_block(verified_block1)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Unrealized justification should NOT have updated.
|
||||
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||
let unrealized_justification = fc.unrealized_justified_checkpoint();
|
||||
assert_eq!(unrealized_justification.epoch, 2);
|
||||
|
||||
// The fork choice node for the block should have unrealized justification.
|
||||
let fc_block = fc.get_block(&block_root).unwrap();
|
||||
assert_eq!(
|
||||
fc_block.unrealized_justified_checkpoint,
|
||||
Some(unrealized_justification)
|
||||
);
|
||||
drop(fc);
|
||||
|
||||
// Import the second verified block, simulating a block processed via RPC.
|
||||
chain
|
||||
.clone()
|
||||
.import_execution_pending_block(verified_block2)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Unrealized justification should still be updated.
|
||||
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||
assert_eq!(
|
||||
fc.unrealized_justified_checkpoint(),
|
||||
unrealized_justification
|
||||
);
|
||||
|
||||
// The fork choice node for the block should still have the unrealized justified checkpoint.
|
||||
let fc_block = fc.get_block(&block_root).unwrap();
|
||||
assert_eq!(
|
||||
fc_block.unrealized_justified_checkpoint,
|
||||
Some(unrealized_justification)
|
||||
);
|
||||
}
|
||||
|
@ -17,9 +17,7 @@ use execution_layer::{
|
||||
test_utils::ExecutionBlockGenerator,
|
||||
ExecutionLayer, ForkchoiceState, PayloadAttributes,
|
||||
};
|
||||
use fork_choice::{
|
||||
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
|
||||
};
|
||||
use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus};
|
||||
use logging::test_logger;
|
||||
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
|
||||
use slot_clock::SlotClock;
|
||||
@ -699,7 +697,6 @@ async fn invalidates_all_descendants() {
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
Arc::new(fork_block),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -799,7 +796,6 @@ async fn switches_heads() {
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
Arc::new(fork_block),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
@ -1056,7 +1052,7 @@ async fn invalid_parent() {
|
||||
|
||||
// Ensure the block built atop an invalid payload is invalid for import.
|
||||
assert!(matches!(
|
||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await,
|
||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await,
|
||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||
if invalid_root == parent_root
|
||||
));
|
||||
@ -1071,7 +1067,7 @@ async fn invalid_parent() {
|
||||
&state,
|
||||
PayloadVerificationStatus::Optimistic,
|
||||
&rig.harness.chain.spec,
|
||||
CountUnrealized::True,
|
||||
|
||||
),
|
||||
Err(ForkChoiceError::ProtoArrayStringError(message))
|
||||
if message.contains(&format!(
|
||||
@ -1342,12 +1338,7 @@ async fn build_optimistic_chain(
|
||||
for block in blocks {
|
||||
rig.harness
|
||||
.chain
|
||||
.process_block(
|
||||
block.canonical_root(),
|
||||
block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@ -1906,7 +1897,6 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
||||
.process_block(
|
||||
fork_block.canonical_root(),
|
||||
fork_block.clone(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
|
@ -13,7 +13,6 @@ use beacon_chain::{
|
||||
ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||
};
|
||||
use eth2_network_config::TRUSTED_SETUP;
|
||||
use fork_choice::CountUnrealized;
|
||||
use kzg::TrustedSetup;
|
||||
use lazy_static::lazy_static;
|
||||
use logging::test_logger;
|
||||
@ -2165,7 +2164,6 @@ async fn weak_subjectivity_sync() {
|
||||
.process_block(
|
||||
full_block.canonical_root(),
|
||||
Arc::new(full_block),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
|
@ -8,7 +8,6 @@ use beacon_chain::{
|
||||
},
|
||||
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||
};
|
||||
use fork_choice::CountUnrealized;
|
||||
use lazy_static::lazy_static;
|
||||
use operation_pool::PersistedOperationPool;
|
||||
use state_processing::{
|
||||
@ -686,7 +685,6 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
||||
.process_block(
|
||||
harness_a.chain.head_snapshot().beacon_block_root,
|
||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
|
@ -3,7 +3,7 @@ use crate::metrics;
|
||||
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
|
||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
||||
use eth2::types::SignedBlockContents;
|
||||
use execution_layer::ProvenancedPayload;
|
||||
use lighthouse_network::PubsubMessage;
|
||||
@ -94,12 +94,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
||||
let slot = block_clone.message().slot();
|
||||
let proposer_index = block_clone.message().proposer_index();
|
||||
match chain
|
||||
.process_block(
|
||||
block_root,
|
||||
wrapped_block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes)
|
||||
.await
|
||||
{
|
||||
Ok(AvailabilityProcessingStatus::Imported(root)) => {
|
||||
|
@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
discv5 = { version = "0.2.2", features = ["libp2p"] }
|
||||
discv5 = { version = "0.3.0", features = ["libp2p"]}
|
||||
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
||||
types = { path = "../../consensus/types" }
|
||||
ssz_types = "0.5.0"
|
||||
|
@ -163,7 +163,7 @@ impl Config {
|
||||
udp_port,
|
||||
tcp_port,
|
||||
});
|
||||
self.discv5_config.ip_mode = discv5::IpMode::Ip4;
|
||||
self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port);
|
||||
self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4)
|
||||
}
|
||||
|
||||
@ -176,9 +176,8 @@ impl Config {
|
||||
udp_port,
|
||||
tcp_port,
|
||||
});
|
||||
self.discv5_config.ip_mode = discv5::IpMode::Ip6 {
|
||||
enable_mapped_addresses: false,
|
||||
};
|
||||
|
||||
self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port);
|
||||
self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6)
|
||||
}
|
||||
|
||||
@ -206,10 +205,10 @@ impl Config {
|
||||
tcp_port: tcp6_port,
|
||||
},
|
||||
);
|
||||
self.discv5_config.listen_config = discv5::ListenConfig::default()
|
||||
.with_ipv4(v4_addr, udp4_port)
|
||||
.with_ipv6(v6_addr, udp6_port);
|
||||
|
||||
self.discv5_config.ip_mode = discv5::IpMode::Ip6 {
|
||||
enable_mapped_addresses: true,
|
||||
};
|
||||
self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) {
|
||||
(None, None) => false,
|
||||
(None, Some(ip6)) => is_global_ipv6(ip6),
|
||||
@ -279,9 +278,17 @@ impl Default for Config {
|
||||
.build()
|
||||
.expect("The total rate limit has been specified"),
|
||||
);
|
||||
let listen_addresses = ListenAddress::V4(ListenAddr {
|
||||
addr: Ipv4Addr::UNSPECIFIED,
|
||||
udp_port: 9000,
|
||||
tcp_port: 9000,
|
||||
});
|
||||
|
||||
let discv5_listen_config =
|
||||
discv5::ListenConfig::from_ip(Ipv4Addr::UNSPECIFIED.into(), 9000);
|
||||
|
||||
// discv5 configuration
|
||||
let discv5_config = Discv5ConfigBuilder::new()
|
||||
let discv5_config = Discv5ConfigBuilder::new(discv5_listen_config)
|
||||
.enable_packet_filter()
|
||||
.session_cache_capacity(5000)
|
||||
.request_timeout(Duration::from_secs(1))
|
||||
@ -304,12 +311,9 @@ impl Default for Config {
|
||||
// NOTE: Some of these get overridden by the corresponding CLI default values.
|
||||
Config {
|
||||
network_dir,
|
||||
listen_addresses: ListenAddress::V4(ListenAddr {
|
||||
addr: Ipv4Addr::UNSPECIFIED,
|
||||
udp_port: 9000,
|
||||
tcp_port: 9000,
|
||||
}),
|
||||
listen_addresses,
|
||||
enr_address: (None, None),
|
||||
|
||||
enr_udp4_port: None,
|
||||
enr_tcp4_port: None,
|
||||
enr_udp6_port: None,
|
||||
|
@ -213,12 +213,16 @@ pub fn build_enr<T: EthSpec>(
|
||||
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
||||
// take preference over disk_enr address if one is not specified
|
||||
(local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4())
|
||||
&&
|
||||
(local_enr.ip6().is_none() || local_enr.ip6() == disk_enr.ip6())
|
||||
// tcp ports must match
|
||||
&& local_enr.tcp4() == disk_enr.tcp4()
|
||||
&& local_enr.tcp6() == disk_enr.tcp6()
|
||||
// must match on the same fork
|
||||
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
||||
// take preference over disk udp port if one is not specified
|
||||
&& (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4())
|
||||
&& (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6())
|
||||
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
||||
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
||||
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
||||
|
@ -198,7 +198,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
|
||||
fn as_peer_id(&self) -> PeerId {
|
||||
match self {
|
||||
Self::Secp256k1(pk) => {
|
||||
let pk_bytes = pk.to_bytes();
|
||||
let pk_bytes = pk.to_sec1_bytes();
|
||||
let libp2p_pk = libp2p::core::PublicKey::Secp256k1(
|
||||
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes)
|
||||
.expect("valid public key"),
|
||||
@ -222,14 +222,16 @@ impl CombinedKeyExt for CombinedKey {
|
||||
match key {
|
||||
Keypair::Secp256k1(key) => {
|
||||
let secret =
|
||||
discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
|
||||
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
|
||||
.expect("libp2p key must be valid");
|
||||
Ok(CombinedKey::Secp256k1(secret))
|
||||
}
|
||||
Keypair::Ed25519(key) => {
|
||||
let ed_keypair =
|
||||
discv5::enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32])
|
||||
.expect("libp2p key must be valid");
|
||||
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
||||
&(key.encode()[..32])
|
||||
.try_into()
|
||||
.expect("libp2p key must be valid"),
|
||||
);
|
||||
Ok(CombinedKey::from(ed_keypair))
|
||||
}
|
||||
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
||||
@ -281,7 +283,7 @@ mod tests {
|
||||
fn test_secp256k1_peer_id_conversion() {
|
||||
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
|
||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
|
||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap();
|
||||
|
||||
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
|
||||
@ -300,16 +302,18 @@ mod tests {
|
||||
fn test_ed25519_peer_conversion() {
|
||||
let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261";
|
||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||
let secret = discv5::enr::ed25519_dalek::SecretKey::from_bytes(&sk_bytes).unwrap();
|
||||
let public = discv5::enr::ed25519_dalek::PublicKey::from(&secret);
|
||||
let keypair = discv5::enr::ed25519_dalek::Keypair { secret, public };
|
||||
let secret_key = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
||||
&sk_bytes.clone().try_into().unwrap(),
|
||||
);
|
||||
|
||||
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||
let ed25519_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
|
||||
let libp2p_kp = Keypair::Ed25519(ed25519_kp);
|
||||
let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
|
||||
let libp2p_kp = Keypair::Ed25519(secp256k1_kp);
|
||||
let peer_id = libp2p_kp.public().to_peer_id();
|
||||
|
||||
let enr = discv5::enr::EnrBuilder::new("v4").build(&keypair).unwrap();
|
||||
let enr = discv5::enr::EnrBuilder::new("v4")
|
||||
.build(&secret_key)
|
||||
.unwrap();
|
||||
let node_id = peer_id_to_node_id(&peer_id).unwrap();
|
||||
|
||||
assert_eq!(enr.node_id(), node_id);
|
||||
|
@ -209,13 +209,6 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(),
|
||||
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()
|
||||
);
|
||||
let listen_socket = match config.listen_addrs() {
|
||||
crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(),
|
||||
crate::listen_addr::ListenAddress::V6(v6_addr) => v6_addr.udp_socket_addr(),
|
||||
crate::listen_addr::ListenAddress::DualStack(_v4_addr, v6_addr) => {
|
||||
v6_addr.udp_socket_addr()
|
||||
}
|
||||
};
|
||||
|
||||
// convert the keypair into an ENR key
|
||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?;
|
||||
@ -251,10 +244,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
|
||||
// Start the discv5 service and obtain an event stream
|
||||
let event_stream = if !config.disable_discovery {
|
||||
discv5
|
||||
.start(listen_socket)
|
||||
.map_err(|e| e.to_string())
|
||||
.await?;
|
||||
discv5.start().map_err(|e| e.to_string()).await?;
|
||||
debug!(log, "Discovery service started");
|
||||
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
||||
} else {
|
||||
@ -413,7 +403,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
/// If the external address needs to be modified, use `update_enr_udp_socket.
|
||||
pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> {
|
||||
self.discv5
|
||||
.enr_insert("tcp", &port.to_be_bytes())
|
||||
.enr_insert("tcp", &port)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
|
||||
// replace the global version
|
||||
@ -428,29 +418,12 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
/// This is with caution. Discovery should automatically maintain this. This should only be
|
||||
/// used when automatic discovery is disabled.
|
||||
pub fn update_enr_udp_socket(&mut self, socket_addr: SocketAddr) -> Result<(), String> {
|
||||
match socket_addr {
|
||||
SocketAddr::V4(socket) => {
|
||||
self.discv5
|
||||
.enr_insert("ip", &socket.ip().octets())
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
self.discv5
|
||||
.enr_insert("udp", &socket.port().to_be_bytes())
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
}
|
||||
SocketAddr::V6(socket) => {
|
||||
self.discv5
|
||||
.enr_insert("ip6", &socket.ip().octets())
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
self.discv5
|
||||
.enr_insert("udp6", &socket.port().to_be_bytes())
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
}
|
||||
}
|
||||
|
||||
// replace the global version
|
||||
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
||||
const IS_TCP: bool = false;
|
||||
if self.discv5.update_local_enr_socket(socket_addr, IS_TCP) {
|
||||
// persist modified enr to disk
|
||||
enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log);
|
||||
}
|
||||
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -217,8 +217,7 @@ mod tests {
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&message);
|
||||
|
||||
let snappy_protocol_id =
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
||||
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
|
||||
|
||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
@ -252,8 +251,7 @@ mod tests {
|
||||
// Insert length-prefix
|
||||
uvi_codec.encode(len, &mut dst).unwrap();
|
||||
|
||||
let snappy_protocol_id =
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
||||
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
|
||||
|
||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
@ -280,8 +278,7 @@ mod tests {
|
||||
dst
|
||||
}
|
||||
|
||||
let protocol_id =
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy);
|
||||
let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy);
|
||||
|
||||
// Response limits
|
||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||
|
@ -1,9 +1,9 @@
|
||||
use crate::rpc::methods::*;
|
||||
use crate::rpc::{
|
||||
codec::base::OutboundCodec,
|
||||
protocol::{Encoding, Protocol, ProtocolId, RPCError, Version, ERROR_TYPE_MAX, ERROR_TYPE_MIN},
|
||||
protocol::{Encoding, ProtocolId, RPCError, SupportedProtocol, ERROR_TYPE_MAX, ERROR_TYPE_MIN},
|
||||
};
|
||||
use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse};
|
||||
use crate::{rpc::methods::*, EnrSyncCommitteeBitfield};
|
||||
use libp2p::bytes::BytesMut;
|
||||
use snap::read::FrameDecoder;
|
||||
use snap::write::FrameEncoder;
|
||||
@ -79,27 +79,14 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
||||
RPCResponse::MetaData(res) =>
|
||||
// Encode the correct version of the MetaData response based on the negotiated version.
|
||||
{
|
||||
match self.protocol.version {
|
||||
Version::V1 => MetaData::<TSpec>::V1(MetaDataV1 {
|
||||
seq_number: *res.seq_number(),
|
||||
attnets: res.attnets().clone(),
|
||||
})
|
||||
.as_ssz_bytes(),
|
||||
Version::V2 => {
|
||||
// `res` is of type MetaDataV2, return the ssz bytes
|
||||
if res.syncnets().is_ok() {
|
||||
res.as_ssz_bytes()
|
||||
} else {
|
||||
// `res` is of type MetaDataV1, create a MetaDataV2 by adding a default syncnets field
|
||||
// Note: This code path is redundant as `res` would be always of type MetaDataV2
|
||||
MetaData::<TSpec>::V2(MetaDataV2 {
|
||||
seq_number: *res.seq_number(),
|
||||
attnets: res.attnets().clone(),
|
||||
syncnets: EnrSyncCommitteeBitfield::<TSpec>::default(),
|
||||
})
|
||||
.as_ssz_bytes()
|
||||
}
|
||||
}
|
||||
match self.protocol.versioned_protocol {
|
||||
SupportedProtocol::MetaDataV1 => res.metadata_v1().as_ssz_bytes(),
|
||||
// We always send V2 metadata responses from the behaviour
|
||||
// No change required.
|
||||
SupportedProtocol::MetaDataV2 => res.metadata_v2().as_ssz_bytes(),
|
||||
_ => unreachable!(
|
||||
"We only send metadata responses on negotiating metadata requests"
|
||||
),
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -142,8 +129,11 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
type Error = RPCError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if self.protocol.message_name == Protocol::MetaData {
|
||||
return Ok(Some(InboundRequest::MetaData(PhantomData)));
|
||||
if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV1 {
|
||||
return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1())));
|
||||
}
|
||||
if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 {
|
||||
return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2())));
|
||||
}
|
||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
||||
Some(len) => len,
|
||||
@ -155,8 +145,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
let ssz_limits = self.protocol.rpc_request_limits();
|
||||
if ssz_limits.is_out_of_bounds(length, self.max_packet_size) {
|
||||
return Err(RPCError::InvalidData(format!(
|
||||
"RPC request length is out of bounds, length {}",
|
||||
length
|
||||
"RPC request length for protocol {:?} is out of bounds, length {}",
|
||||
self.protocol.versioned_protocol, length
|
||||
)));
|
||||
}
|
||||
// Calculate worst case compression length for given uncompressed length
|
||||
@ -173,11 +163,7 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
let n = reader.get_ref().get_ref().position();
|
||||
self.len = None;
|
||||
let _read_bytes = src.split_to(n as usize);
|
||||
|
||||
match self.protocol.version {
|
||||
Version::V1 => handle_v1_request(self.protocol.message_name, &decoded_buffer),
|
||||
Version::V2 => handle_v2_request(self.protocol.message_name, &decoded_buffer),
|
||||
}
|
||||
handle_rpc_request(self.protocol.versioned_protocol, &decoded_buffer)
|
||||
}
|
||||
Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len),
|
||||
}
|
||||
@ -231,13 +217,18 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
|
||||
let bytes = match item {
|
||||
OutboundRequest::Status(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
|
||||
OutboundRequest::BlocksByRange(r) => match r {
|
||||
OldBlocksByRangeRequest::V1(req) => req.as_ssz_bytes(),
|
||||
OldBlocksByRangeRequest::V2(req) => req.as_ssz_bytes(),
|
||||
},
|
||||
OutboundRequest::BlocksByRoot(r) => match r {
|
||||
BlocksByRootRequest::V1(req) => req.block_roots.as_ssz_bytes(),
|
||||
BlocksByRootRequest::V2(req) => req.block_roots.as_ssz_bytes(),
|
||||
},
|
||||
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(),
|
||||
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
||||
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
||||
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
|
||||
};
|
||||
// SSZ encoded bytes should be within `max_packet_size`
|
||||
if bytes.len() > self.max_packet_size {
|
||||
@ -316,19 +307,10 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
||||
let n = reader.get_ref().get_ref().position();
|
||||
self.len = None;
|
||||
let _read_bytes = src.split_to(n as usize);
|
||||
|
||||
match self.protocol.version {
|
||||
Version::V1 => handle_v1_response(
|
||||
self.protocol.message_name,
|
||||
&decoded_buffer,
|
||||
&mut self.fork_name,
|
||||
),
|
||||
Version::V2 => handle_v2_response(
|
||||
self.protocol.message_name,
|
||||
&decoded_buffer,
|
||||
&mut self.fork_name,
|
||||
),
|
||||
}
|
||||
// Safe to `take` from `self.fork_name` as we have all the bytes we need to
|
||||
// decode an ssz object at this point.
|
||||
let fork_name = &mut self.fork_name.take();
|
||||
handle_rpc_response(self.protocol.versioned_protocol, &decoded_buffer, fork_name)
|
||||
}
|
||||
Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len),
|
||||
}
|
||||
@ -472,111 +454,106 @@ fn handle_length(
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a `Version::V1` `InboundRequest` from the byte stream.
|
||||
/// Decodes an `InboundRequest` from the byte stream.
|
||||
/// `decoded_buffer` should be an ssz-encoded bytestream with
|
||||
// length = length-prefix received in the beginning of the stream.
|
||||
fn handle_v1_request<T: EthSpec>(
|
||||
protocol: Protocol,
|
||||
fn handle_rpc_request<T: EthSpec>(
|
||||
versioned_protocol: SupportedProtocol,
|
||||
decoded_buffer: &[u8],
|
||||
) -> Result<Option<InboundRequest<T>>, RPCError> {
|
||||
match protocol {
|
||||
Protocol::Status => Ok(Some(InboundRequest::Status(StatusMessage::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?))),
|
||||
Protocol::Goodbye => Ok(Some(InboundRequest::Goodbye(
|
||||
match versioned_protocol {
|
||||
SupportedProtocol::StatusV1 => Ok(Some(InboundRequest::Status(
|
||||
StatusMessage::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
SupportedProtocol::GoodbyeV1 => Ok(Some(InboundRequest::Goodbye(
|
||||
GoodbyeReason::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
||||
SupportedProtocol::BlocksByRangeV2 => Ok(Some(InboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2::from_ssz_bytes(decoded_buffer)?),
|
||||
))),
|
||||
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
SupportedProtocol::BlocksByRangeV1 => Ok(Some(InboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1::from_ssz_bytes(decoded_buffer)?),
|
||||
))),
|
||||
SupportedProtocol::BlocksByRootV2 => Ok(Some(InboundRequest::BlocksByRoot(
|
||||
BlocksByRootRequest::V2(BlocksByRootRequestV2 {
|
||||
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
|
||||
}),
|
||||
))),
|
||||
SupportedProtocol::BlocksByRootV1 => Ok(Some(InboundRequest::BlocksByRoot(
|
||||
BlocksByRootRequest::V1(BlocksByRootRequestV1 {
|
||||
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
}),
|
||||
))),
|
||||
SupportedProtocol::BlobsByRangeV1 => Ok(Some(InboundRequest::BlobsByRange(
|
||||
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
Protocol::BlobsByRoot => Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest {
|
||||
SupportedProtocol::BlobsByRootV1 => {
|
||||
Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest {
|
||||
blob_ids: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
|
||||
})))
|
||||
}
|
||||
SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping {
|
||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap(
|
||||
LightClientBootstrapRequest {
|
||||
SupportedProtocol::LightClientBootstrapV1 => Ok(Some(
|
||||
InboundRequest::LightClientBootstrap(LightClientBootstrapRequest {
|
||||
root: Hash256::from_ssz_bytes(decoded_buffer)?,
|
||||
},
|
||||
))),
|
||||
}),
|
||||
)),
|
||||
// MetaData requests return early from InboundUpgrade and do not reach the decoder.
|
||||
// Handle this case just for completeness.
|
||||
Protocol::MetaData => {
|
||||
SupportedProtocol::MetaDataV2 => {
|
||||
if !decoded_buffer.is_empty() {
|
||||
Err(RPCError::InternalError(
|
||||
"Metadata requests shouldn't reach decoder",
|
||||
))
|
||||
} else {
|
||||
Ok(Some(InboundRequest::MetaData(PhantomData)))
|
||||
Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a `Version::V2` `InboundRequest` from the byte stream.
|
||||
/// `decoded_buffer` should be an ssz-encoded bytestream with
|
||||
// length = length-prefix received in the beginning of the stream.
|
||||
fn handle_v2_request<T: EthSpec>(
|
||||
protocol: Protocol,
|
||||
decoded_buffer: &[u8],
|
||||
) -> Result<Option<InboundRequest<T>>, RPCError> {
|
||||
match protocol {
|
||||
Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
// MetaData requests return early from InboundUpgrade and do not reach the decoder.
|
||||
// Handle this case just for completeness.
|
||||
Protocol::MetaData => {
|
||||
SupportedProtocol::MetaDataV1 => {
|
||||
if !decoded_buffer.is_empty() {
|
||||
Err(RPCError::InvalidData("Metadata request".to_string()))
|
||||
} else {
|
||||
Ok(Some(InboundRequest::MetaData(PhantomData)))
|
||||
Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v1())))
|
||||
}
|
||||
}
|
||||
_ => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!("{} does not support version 2", protocol),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a `Version::V1` `RPCResponse` from the byte stream.
|
||||
/// Decodes a `RPCResponse` from the byte stream.
|
||||
/// `decoded_buffer` should be an ssz-encoded bytestream with
|
||||
// length = length-prefix received in the beginning of the stream.
|
||||
fn handle_v1_response<T: EthSpec>(
|
||||
protocol: Protocol,
|
||||
/// length = length-prefix received in the beginning of the stream.
|
||||
///
|
||||
/// For BlocksByRange/BlocksByRoot reponses, decodes the appropriate response
|
||||
/// according to the received `ForkName`.
|
||||
fn handle_rpc_response<T: EthSpec>(
|
||||
versioned_protocol: SupportedProtocol,
|
||||
decoded_buffer: &[u8],
|
||||
fork_name: &mut Option<ForkName>,
|
||||
) -> Result<Option<RPCResponse<T>>, RPCError> {
|
||||
match protocol {
|
||||
Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?))),
|
||||
match versioned_protocol {
|
||||
SupportedProtocol::StatusV1 => Ok(Some(RPCResponse::Status(
|
||||
StatusMessage::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
// This case should be unreachable as `Goodbye` has no response.
|
||||
Protocol::Goodbye => Err(RPCError::InvalidData(
|
||||
SupportedProtocol::GoodbyeV1 => Err(RPCError::InvalidData(
|
||||
"Goodbye RPC message has no valid response".to_string(),
|
||||
)),
|
||||
Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SupportedProtocol::BlocksByRangeV1 => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SupportedProtocol::BlocksByRootV1 => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
Protocol::BlobsByRange => {
|
||||
SupportedProtocol::BlobsByRangeV1 => {
|
||||
let fork_name = fork_name.take().ok_or_else(|| {
|
||||
RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!("No context bytes provided for {} response", protocol),
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)
|
||||
})?;
|
||||
match fork_name {
|
||||
@ -589,11 +566,14 @@ fn handle_v1_response<T: EthSpec>(
|
||||
)),
|
||||
}
|
||||
}
|
||||
Protocol::BlobsByRoot => {
|
||||
SupportedProtocol::BlobsByRootV1 => {
|
||||
let fork_name = fork_name.take().ok_or_else(|| {
|
||||
RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!("No context bytes provided for {} response", protocol),
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)
|
||||
})?;
|
||||
match fork_name {
|
||||
@ -606,104 +586,72 @@ fn handle_v1_response<T: EthSpec>(
|
||||
)),
|
||||
}
|
||||
}
|
||||
Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping {
|
||||
SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping {
|
||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||
}))),
|
||||
Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1(
|
||||
SupportedProtocol::MetaDataV1 => Ok(Some(RPCResponse::MetaData(MetaData::V1(
|
||||
MetaDataV1::from_ssz_bytes(decoded_buffer)?,
|
||||
)))),
|
||||
Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap(
|
||||
SupportedProtocol::LightClientBootstrapV1 => Ok(Some(RPCResponse::LightClientBootstrap(
|
||||
LightClientBootstrap::from_ssz_bytes(decoded_buffer)?,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a `Version::V2` `RPCResponse` from the byte stream.
|
||||
/// `decoded_buffer` should be an ssz-encoded bytestream with
|
||||
// length = length-prefix received in the beginning of the stream.
|
||||
///
|
||||
/// For BlocksByRange/BlocksByRoot reponses, decodes the appropriate response
|
||||
/// according to the received `ForkName`.
|
||||
fn handle_v2_response<T: EthSpec>(
|
||||
protocol: Protocol,
|
||||
decoded_buffer: &[u8],
|
||||
fork_name: &mut Option<ForkName>,
|
||||
) -> Result<Option<RPCResponse<T>>, RPCError> {
|
||||
// MetaData does not contain context_bytes
|
||||
if let Protocol::MetaData = protocol {
|
||||
Ok(Some(RPCResponse::MetaData(MetaData::V2(
|
||||
// MetaData V2 responses have no context bytes, so behave similarly to V1 responses
|
||||
SupportedProtocol::MetaDataV2 => Ok(Some(RPCResponse::MetaData(MetaData::V2(
|
||||
MetaDataV2::from_ssz_bytes(decoded_buffer)?,
|
||||
))))
|
||||
} else {
|
||||
let fork_name = fork_name.take().ok_or_else(|| {
|
||||
RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!("No context bytes provided for {} response", protocol),
|
||||
)
|
||||
})?;
|
||||
match protocol {
|
||||
Protocol::BlocksByRange => match fork_name {
|
||||
ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
SupportedProtocol::BlocksByRangeV2 => match fork_name {
|
||||
Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
|
||||
ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
ForkName::Capella => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
ForkName::Deneb => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRange(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
},
|
||||
Protocol::BlocksByRoot => match fork_name {
|
||||
ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
ForkName::Capella => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
ForkName::Deneb => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
},
|
||||
Protocol::BlobsByRange => {
|
||||
Err(RPCError::InvalidData("blobs by range via v2".to_string()))
|
||||
}
|
||||
Protocol::BlobsByRoot => {
|
||||
Err(RPCError::InvalidData("blobs by range via v2".to_string()))
|
||||
}
|
||||
_ => Err(RPCError::ErrorResponse(
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Invalid v2 request".to_string(),
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)),
|
||||
}
|
||||
},
|
||||
SupportedProtocol::BlocksByRootV2 => match fork_name {
|
||||
Some(ForkName::Altair) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
Some(ForkName::Base) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
Some(ForkName::Merge) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
Some(ForkName::Capella) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Capella(SignedBeaconBlockCapella::from_ssz_bytes(
|
||||
decoded_buffer,
|
||||
)?),
|
||||
)))),
|
||||
Some(ForkName::Deneb) => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||
SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb::from_ssz_bytes(decoded_buffer)?),
|
||||
)))),
|
||||
None => Err(RPCError::ErrorResponse(
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
format!(
|
||||
"No context bytes provided for {:?} response",
|
||||
versioned_protocol
|
||||
),
|
||||
)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -823,12 +771,12 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn bbrange_request() -> OldBlocksByRangeRequest {
|
||||
OldBlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: 10,
|
||||
step: 1,
|
||||
fn bbrange_request_v1() -> OldBlocksByRangeRequest {
|
||||
OldBlocksByRangeRequest::new_v1(0, 10, 1)
|
||||
}
|
||||
|
||||
fn bbrange_request_v2() -> OldBlocksByRangeRequest {
|
||||
OldBlocksByRangeRequest::new(0, 10, 1)
|
||||
}
|
||||
|
||||
fn blbrange_request() -> BlobsByRangeRequest {
|
||||
@ -838,10 +786,12 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn bbroot_request() -> BlocksByRootRequest {
|
||||
BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![Hash256::zero()]),
|
||||
fn bbroot_request_v1() -> BlocksByRootRequest {
|
||||
BlocksByRootRequest::new_v1(vec![Hash256::zero()].into())
|
||||
}
|
||||
|
||||
fn bbroot_request_v2() -> BlocksByRootRequest {
|
||||
BlocksByRootRequest::new(vec![Hash256::zero()].into())
|
||||
}
|
||||
|
||||
fn blbroot_request() -> BlobsByRootRequest {
|
||||
@ -874,12 +824,11 @@ mod tests {
|
||||
|
||||
/// Encodes the given protocol response as bytes.
|
||||
fn encode_response(
|
||||
protocol: Protocol,
|
||||
version: Version,
|
||||
protocol: SupportedProtocol,
|
||||
message: RPCCodedResponse<Spec>,
|
||||
fork_name: ForkName,
|
||||
) -> Result<BytesMut, RPCError> {
|
||||
let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy);
|
||||
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
|
||||
let fork_context = Arc::new(fork_context(fork_name));
|
||||
let max_packet_size = max_rpc_size(&fork_context);
|
||||
|
||||
@ -921,12 +870,11 @@ mod tests {
|
||||
|
||||
/// Attempts to decode the given protocol bytes as an rpc response
|
||||
fn decode_response(
|
||||
protocol: Protocol,
|
||||
version: Version,
|
||||
protocol: SupportedProtocol,
|
||||
message: &mut BytesMut,
|
||||
fork_name: ForkName,
|
||||
) -> Result<Option<RPCResponse<Spec>>, RPCError> {
|
||||
let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy);
|
||||
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
|
||||
let fork_context = Arc::new(fork_context(fork_name));
|
||||
let max_packet_size = max_rpc_size(&fork_context);
|
||||
let mut snappy_outbound_codec =
|
||||
@ -937,20 +885,19 @@ mod tests {
|
||||
|
||||
/// Encodes the provided protocol message as bytes and tries to decode the encoding bytes.
|
||||
fn encode_then_decode_response(
|
||||
protocol: Protocol,
|
||||
version: Version,
|
||||
protocol: SupportedProtocol,
|
||||
message: RPCCodedResponse<Spec>,
|
||||
fork_name: ForkName,
|
||||
) -> Result<Option<RPCResponse<Spec>>, RPCError> {
|
||||
let mut encoded = encode_response(protocol, version.clone(), message, fork_name)?;
|
||||
decode_response(protocol, version, &mut encoded, fork_name)
|
||||
let mut encoded = encode_response(protocol, message, fork_name)?;
|
||||
decode_response(protocol, &mut encoded, fork_name)
|
||||
}
|
||||
|
||||
/// Verifies that requests we send are encoded in a way that we would correctly decode too.
|
||||
fn encode_then_decode_request(req: OutboundRequest<Spec>, fork_name: ForkName) {
|
||||
let fork_context = Arc::new(fork_context(fork_name));
|
||||
let max_packet_size = max_rpc_size(&fork_context);
|
||||
for protocol in req.supported_protocols() {
|
||||
let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy);
|
||||
// Encode a request we send
|
||||
let mut buf = BytesMut::new();
|
||||
let mut outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||
@ -960,11 +907,8 @@ mod tests {
|
||||
);
|
||||
outbound_codec.encode(req.clone(), &mut buf).unwrap();
|
||||
|
||||
let mut inbound_codec = SSZSnappyInboundCodec::<Spec>::new(
|
||||
protocol.clone(),
|
||||
max_packet_size,
|
||||
fork_context.clone(),
|
||||
);
|
||||
let mut inbound_codec =
|
||||
SSZSnappyInboundCodec::<Spec>::new(protocol.clone(), max_packet_size, fork_context);
|
||||
|
||||
let decoded = inbound_codec.decode(&mut buf).unwrap().unwrap_or_else(|| {
|
||||
panic!(
|
||||
@ -972,7 +916,7 @@ mod tests {
|
||||
req, protocol, fork_name
|
||||
)
|
||||
});
|
||||
match req.clone() {
|
||||
match req {
|
||||
OutboundRequest::Status(status) => {
|
||||
assert_eq!(decoded, InboundRequest::Status(status))
|
||||
}
|
||||
@ -997,10 +941,6 @@ mod tests {
|
||||
OutboundRequest::MetaData(metadata) => {
|
||||
assert_eq!(decoded, InboundRequest::MetaData(metadata))
|
||||
}
|
||||
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
||||
assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1009,8 +949,7 @@ mod tests {
|
||||
fn test_encode_then_decode_v1() {
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::Status,
|
||||
Version::V1,
|
||||
SupportedProtocol::StatusV1,
|
||||
RPCCodedResponse::Success(RPCResponse::Status(status_message())),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1019,8 +958,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::Ping,
|
||||
Version::V1,
|
||||
SupportedProtocol::PingV1,
|
||||
RPCCodedResponse::Success(RPCResponse::Pong(ping_message())),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1029,8 +967,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlocksByRangeV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1042,8 +979,7 @@ mod tests {
|
||||
assert!(
|
||||
matches!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlocksByRangeV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1055,8 +991,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlocksByRootV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1068,8 +1003,7 @@ mod tests {
|
||||
assert!(
|
||||
matches!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlocksByRootV1,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1081,18 +1015,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V1,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
|
||||
ForkName::Base,
|
||||
),
|
||||
Ok(Some(RPCResponse::MetaData(metadata()))),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V1,
|
||||
SupportedProtocol::MetaDataV1,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1102,8 +1025,7 @@ mod tests {
|
||||
// A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V1,
|
||||
SupportedProtocol::MetaDataV1,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1114,38 +1036,9 @@ mod tests {
|
||||
// Test RPCResponse encoding/decoding for V1 messages
|
||||
#[test]
|
||||
fn test_encode_then_decode_v2() {
|
||||
assert!(
|
||||
matches!(
|
||||
encode_then_decode_response(
|
||||
Protocol::Status,
|
||||
Version::V2,
|
||||
RPCCodedResponse::Success(RPCResponse::Status(status_message())),
|
||||
ForkName::Base,
|
||||
)
|
||||
.unwrap_err(),
|
||||
RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _),
|
||||
),
|
||||
"status does not have V2 message"
|
||||
);
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
encode_then_decode_response(
|
||||
Protocol::Ping,
|
||||
Version::V2,
|
||||
RPCCodedResponse::Success(RPCResponse::Pong(ping_message())),
|
||||
ForkName::Base,
|
||||
)
|
||||
.unwrap_err(),
|
||||
RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _),
|
||||
),
|
||||
"ping does not have V2 message"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1159,8 +1052,7 @@ mod tests {
|
||||
// the current_fork's rpc limit
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
|
||||
ForkName::Altair,
|
||||
),
|
||||
@ -1171,8 +1063,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))),
|
||||
ForkName::Altair,
|
||||
),
|
||||
@ -1184,8 +1075,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(
|
||||
merge_block_small.clone()
|
||||
))),
|
||||
@ -1203,8 +1093,7 @@ mod tests {
|
||||
assert!(
|
||||
matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut encoded,
|
||||
ForkName::Merge,
|
||||
)
|
||||
@ -1216,8 +1105,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1231,8 +1119,7 @@ mod tests {
|
||||
// the current_fork's rpc limit
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Altair,
|
||||
),
|
||||
@ -1243,8 +1130,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
|
||||
ForkName::Altair,
|
||||
),
|
||||
@ -1253,8 +1139,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(
|
||||
merge_block_small.clone()
|
||||
))),
|
||||
@ -1270,8 +1155,7 @@ mod tests {
|
||||
assert!(
|
||||
matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
&mut encoded,
|
||||
ForkName::Merge,
|
||||
)
|
||||
@ -1284,8 +1168,7 @@ mod tests {
|
||||
// A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V2,
|
||||
SupportedProtocol::MetaDataV2,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
|
||||
ForkName::Base,
|
||||
),
|
||||
@ -1294,8 +1177,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
encode_then_decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V2,
|
||||
SupportedProtocol::MetaDataV2,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())),
|
||||
ForkName::Altair,
|
||||
),
|
||||
@ -1310,8 +1192,7 @@ mod tests {
|
||||
|
||||
// Removing context bytes for v2 messages should error
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
)
|
||||
@ -1321,8 +1202,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut encoded_bytes,
|
||||
ForkName::Base
|
||||
)
|
||||
@ -1331,8 +1211,7 @@ mod tests {
|
||||
));
|
||||
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Base,
|
||||
)
|
||||
@ -1342,8 +1221,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut encoded_bytes,
|
||||
ForkName::Base
|
||||
)
|
||||
@ -1353,8 +1231,7 @@ mod tests {
|
||||
|
||||
// Trying to decode a base block with altair context bytes should give ssz decoding error
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1367,8 +1244,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut wrong_fork_bytes,
|
||||
ForkName::Altair
|
||||
)
|
||||
@ -1378,8 +1254,7 @@ mod tests {
|
||||
|
||||
// Trying to decode an altair block with base context bytes should give ssz decoding error
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1391,8 +1266,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut wrong_fork_bytes,
|
||||
ForkName::Altair
|
||||
)
|
||||
@ -1405,8 +1279,7 @@ mod tests {
|
||||
encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap());
|
||||
encoded_bytes.extend_from_slice(
|
||||
&encode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V2,
|
||||
SupportedProtocol::MetaDataV2,
|
||||
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1414,8 +1287,7 @@ mod tests {
|
||||
);
|
||||
|
||||
assert!(decode_response(
|
||||
Protocol::MetaData,
|
||||
Version::V2,
|
||||
SupportedProtocol::MetaDataV2,
|
||||
&mut encoded_bytes,
|
||||
ForkName::Altair
|
||||
)
|
||||
@ -1423,8 +1295,7 @@ mod tests {
|
||||
|
||||
// Sending context bytes which do not correspond to any fork should return an error
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1436,8 +1307,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut wrong_fork_bytes,
|
||||
ForkName::Altair
|
||||
)
|
||||
@ -1447,8 +1317,7 @@ mod tests {
|
||||
|
||||
// Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)`
|
||||
let mut encoded_bytes = encode_response(
|
||||
Protocol::BlocksByRoot,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRootV2,
|
||||
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
|
||||
ForkName::Altair,
|
||||
)
|
||||
@ -1458,8 +1327,7 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut part,
|
||||
ForkName::Altair
|
||||
),
|
||||
@ -1473,11 +1341,14 @@ mod tests {
|
||||
OutboundRequest::Ping(ping_message()),
|
||||
OutboundRequest::Status(status_message()),
|
||||
OutboundRequest::Goodbye(GoodbyeReason::Fault),
|
||||
OutboundRequest::BlocksByRange(bbrange_request()),
|
||||
OutboundRequest::BlocksByRoot(bbroot_request()),
|
||||
OutboundRequest::BlocksByRange(bbrange_request_v1()),
|
||||
OutboundRequest::BlocksByRange(bbrange_request_v2()),
|
||||
OutboundRequest::BlocksByRoot(bbroot_request_v1()),
|
||||
OutboundRequest::BlocksByRoot(bbroot_request_v2()),
|
||||
OutboundRequest::MetaData(MetadataRequest::new_v1()),
|
||||
OutboundRequest::BlobsByRange(blbrange_request()),
|
||||
OutboundRequest::BlobsByRoot(blbroot_request()),
|
||||
OutboundRequest::MetaData(PhantomData::<Spec>),
|
||||
OutboundRequest::MetaData(MetadataRequest::new_v2()),
|
||||
];
|
||||
for req in requests.iter() {
|
||||
for fork_name in ForkName::list_all() {
|
||||
@ -1537,7 +1408,7 @@ mod tests {
|
||||
|
||||
// 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
|
||||
assert!(matches!(
|
||||
decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(),
|
||||
decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(),
|
||||
RPCError::InvalidData(_)
|
||||
));
|
||||
}
|
||||
@ -1595,8 +1466,7 @@ mod tests {
|
||||
// 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
|
||||
assert!(matches!(
|
||||
decode_response(
|
||||
Protocol::BlocksByRange,
|
||||
Version::V2,
|
||||
SupportedProtocol::BlocksByRangeV2,
|
||||
&mut dst,
|
||||
ForkName::Altair
|
||||
)
|
||||
@ -1639,7 +1509,7 @@ mod tests {
|
||||
dst.extend_from_slice(writer.get_ref());
|
||||
|
||||
assert!(matches!(
|
||||
decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(),
|
||||
decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(),
|
||||
RPCError::InvalidData(_)
|
||||
));
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProto
|
||||
use super::{RPCReceived, RPCSend, ReqId};
|
||||
use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
|
||||
use crate::rpc::protocol::InboundFramed;
|
||||
use crate::rpc::ResponseTermination;
|
||||
use fnv::FnvHashMap;
|
||||
use futures::prelude::*;
|
||||
use futures::{Sink, SinkExt};
|
||||
@ -245,7 +246,7 @@ where
|
||||
while let Some((id, req)) = self.dial_queue.pop() {
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::Disconnected,
|
||||
proto: req.protocol(),
|
||||
proto: req.versioned_protocol().protocol(),
|
||||
id,
|
||||
}));
|
||||
}
|
||||
@ -269,7 +270,7 @@ where
|
||||
}
|
||||
_ => self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error: RPCError::Disconnected,
|
||||
proto: req.protocol(),
|
||||
proto: req.versioned_protocol().protocol(),
|
||||
id,
|
||||
})),
|
||||
}
|
||||
@ -334,7 +335,7 @@ where
|
||||
) {
|
||||
self.dial_negotiated -= 1;
|
||||
let (id, request) = request_info;
|
||||
let proto = request.protocol();
|
||||
let proto = request.versioned_protocol().protocol();
|
||||
|
||||
// accept outbound connections only if the handler is not deactivated
|
||||
if matches!(self.state, HandlerState::Deactivated) {
|
||||
@ -414,7 +415,7 @@ where
|
||||
128,
|
||||
) as usize),
|
||||
delay_key: Some(delay_key),
|
||||
protocol: req.protocol(),
|
||||
protocol: req.versioned_protocol().protocol(),
|
||||
request_start_time: Instant::now(),
|
||||
remaining_chunks: expected_responses,
|
||||
},
|
||||
@ -422,7 +423,7 @@ where
|
||||
} else {
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
id: self.current_inbound_substream_id,
|
||||
proto: req.protocol(),
|
||||
proto: req.versioned_protocol().protocol(),
|
||||
error: RPCError::HandlerRejected,
|
||||
}));
|
||||
return self.shutdown(None);
|
||||
@ -498,7 +499,7 @@ where
|
||||
};
|
||||
self.events_out.push(Err(HandlerErr::Outbound {
|
||||
error,
|
||||
proto: req.protocol(),
|
||||
proto: req.versioned_protocol().protocol(),
|
||||
id,
|
||||
}));
|
||||
}
|
||||
@ -895,7 +896,7 @@ where
|
||||
// else we return an error, stream should not have closed early.
|
||||
let outbound_err = HandlerErr::Outbound {
|
||||
id: request_id,
|
||||
proto: request.protocol(),
|
||||
proto: request.versioned_protocol().protocol(),
|
||||
error: RPCError::IncompleteStream,
|
||||
};
|
||||
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
|
||||
@ -933,8 +934,13 @@ where
|
||||
// continue sending responses beyond what we would expect. Here
|
||||
// we simply terminate the stream and report a stream
|
||||
// termination to the application
|
||||
let termination = match protocol {
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
|
||||
};
|
||||
|
||||
if let Some(termination) = protocol.terminator() {
|
||||
if let Some(termination) = termination {
|
||||
return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(
|
||||
RPCReceived::EndOfStream(request_id, termination),
|
||||
)));
|
||||
|
@ -3,11 +3,13 @@
|
||||
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
||||
use regex::bytes::Regex;
|
||||
use serde::Serialize;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{
|
||||
typenum::{U1024, U256, U768},
|
||||
VariableList,
|
||||
};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use strum::IntoStaticStr;
|
||||
@ -96,6 +98,30 @@ pub struct Ping {
|
||||
pub data: u64,
|
||||
}
|
||||
|
||||
/// The METADATA request structure.
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variant_attributes(derive(Clone, Debug, PartialEq, Serialize),)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct MetadataRequest<T: EthSpec> {
|
||||
_phantom_data: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> MetadataRequest<T> {
|
||||
pub fn new_v1() -> Self {
|
||||
Self::V1(MetadataRequestV1 {
|
||||
_phantom_data: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_v2() -> Self {
|
||||
Self::V2(MetadataRequestV2 {
|
||||
_phantom_data: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The METADATA response structure.
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
@ -104,9 +130,8 @@ pub struct Ping {
|
||||
serde(bound = "T: EthSpec", deny_unknown_fields),
|
||||
)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Encode)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
#[ssz(enum_behaviour = "transparent")]
|
||||
pub struct MetaData<T: EthSpec> {
|
||||
/// A sequential counter indicating when data gets modified.
|
||||
pub seq_number: u64,
|
||||
@ -117,6 +142,38 @@ pub struct MetaData<T: EthSpec> {
|
||||
pub syncnets: EnrSyncCommitteeBitfield<T>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> MetaData<T> {
|
||||
/// Returns a V1 MetaData response from self.
|
||||
pub fn metadata_v1(&self) -> Self {
|
||||
match self {
|
||||
md @ MetaData::V1(_) => md.clone(),
|
||||
MetaData::V2(metadata) => MetaData::V1(MetaDataV1 {
|
||||
seq_number: metadata.seq_number,
|
||||
attnets: metadata.attnets.clone(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a V2 MetaData response from self by filling unavailable fields with default.
|
||||
pub fn metadata_v2(&self) -> Self {
|
||||
match self {
|
||||
MetaData::V1(metadata) => MetaData::V2(MetaDataV2 {
|
||||
seq_number: metadata.seq_number,
|
||||
attnets: metadata.attnets.clone(),
|
||||
syncnets: Default::default(),
|
||||
}),
|
||||
md @ MetaData::V2(_) => md.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ssz_bytes(&self) -> Vec<u8> {
|
||||
match self {
|
||||
MetaData::V1(md) => md.as_ssz_bytes(),
|
||||
MetaData::V2(md) => md.as_ssz_bytes(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The reason given for a `Goodbye` message.
|
||||
///
|
||||
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
|
||||
@ -208,7 +265,11 @@ impl ssz::Decode for GoodbyeReason {
|
||||
}
|
||||
|
||||
/// Request a number of beacon block roots from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRangeRequest {
|
||||
/// The starting slot to request blocks.
|
||||
pub start_slot: u64,
|
||||
@ -217,6 +278,17 @@ pub struct BlocksByRangeRequest {
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
impl BlocksByRangeRequest {
|
||||
/// The default request is V2
|
||||
pub fn new(start_slot: u64, count: u64) -> Self {
|
||||
Self::V2(BlocksByRangeRequestV2 { start_slot, count })
|
||||
}
|
||||
|
||||
pub fn new_v1(start_slot: u64, count: u64) -> Self {
|
||||
Self::V1(BlocksByRangeRequestV1 { start_slot, count })
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon blobs from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
pub struct BlobsByRangeRequest {
|
||||
@ -228,7 +300,11 @@ pub struct BlobsByRangeRequest {
|
||||
}
|
||||
|
||||
/// Request a number of beacon block roots from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
#[superstruct(
|
||||
variants(V1, V2),
|
||||
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct OldBlocksByRangeRequest {
|
||||
/// The starting slot to request blocks.
|
||||
pub start_slot: u64,
|
||||
@ -244,13 +320,43 @@ pub struct OldBlocksByRangeRequest {
|
||||
pub step: u64,
|
||||
}
|
||||
|
||||
impl OldBlocksByRangeRequest {
|
||||
/// The default request is V2
|
||||
pub fn new(start_slot: u64, count: u64, step: u64) -> Self {
|
||||
Self::V2(OldBlocksByRangeRequestV2 {
|
||||
start_slot,
|
||||
count,
|
||||
step,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_v1(start_slot: u64, count: u64, step: u64) -> Self {
|
||||
Self::V1(OldBlocksByRangeRequestV1 {
|
||||
start_slot,
|
||||
count,
|
||||
step,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon block bodies from a peer.
|
||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||
#[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlocksByRootRequest {
|
||||
/// The list of beacon block bodies being requested.
|
||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||
}
|
||||
|
||||
impl BlocksByRootRequest {
|
||||
pub fn new(block_roots: VariableList<Hash256, MaxRequestBlocks>) -> Self {
|
||||
Self::V2(BlocksByRootRequestV2 { block_roots })
|
||||
}
|
||||
|
||||
pub fn new_v1(block_roots: VariableList<Hash256, MaxRequestBlocks>) -> Self {
|
||||
Self::V1(BlocksByRootRequestV1 { block_roots })
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a number of beacon blocks and blobs from a peer.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BlobsByRootRequest {
|
||||
@ -492,7 +598,12 @@ impl std::fmt::Display for GoodbyeReason {
|
||||
|
||||
impl std::fmt::Display for BlocksByRangeRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count)
|
||||
write!(
|
||||
f,
|
||||
"Start Slot: {}, Count: {}",
|
||||
self.start_slot(),
|
||||
self.count()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -501,7 +612,9 @@ impl std::fmt::Display for OldBlocksByRangeRequest {
|
||||
write!(
|
||||
f,
|
||||
"Start Slot: {}, Count: {}, Step: {}",
|
||||
self.start_slot, self.count, self.step
|
||||
self.start_slot(),
|
||||
self.count(),
|
||||
self.step()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ where
|
||||
}
|
||||
Err(RateLimitedErr::TooLarge) => {
|
||||
// we set the batch sizes, so this is a coding/config err for most protocols
|
||||
let protocol = req.protocol();
|
||||
let protocol = req.versioned_protocol().protocol();
|
||||
if matches!(protocol, Protocol::BlocksByRange) {
|
||||
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
|
||||
} else {
|
||||
@ -335,7 +335,7 @@ where
|
||||
serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?;
|
||||
let (msg_kind, protocol) = match &self.event {
|
||||
Ok(received) => match received {
|
||||
RPCReceived::Request(_, req) => ("request", req.protocol()),
|
||||
RPCReceived::Request(_, req) => ("request", req.versioned_protocol().protocol()),
|
||||
RPCReceived::Response(_, res) => ("response", res.protocol()),
|
||||
RPCReceived::EndOfStream(_, end) => (
|
||||
"end_of_stream",
|
||||
|
@ -1,11 +1,8 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::methods::*;
|
||||
use super::protocol::Protocol;
|
||||
use super::protocol::ProtocolId;
|
||||
use super::protocol::SupportedProtocol;
|
||||
use super::RPCError;
|
||||
use crate::rpc::protocol::Encoding;
|
||||
use crate::rpc::protocol::Version;
|
||||
use crate::rpc::{
|
||||
codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec},
|
||||
methods::ResponseTermination,
|
||||
@ -40,9 +37,8 @@ pub enum OutboundRequest<TSpec: EthSpec> {
|
||||
BlocksByRoot(BlocksByRootRequest),
|
||||
BlobsByRange(BlobsByRangeRequest),
|
||||
BlobsByRoot(BlobsByRootRequest),
|
||||
LightClientBootstrap(LightClientBootstrapRequest),
|
||||
Ping(Ping),
|
||||
MetaData(PhantomData<TSpec>),
|
||||
MetaData(MetadataRequest<TSpec>),
|
||||
}
|
||||
|
||||
impl<TSpec: EthSpec> UpgradeInfo for OutboundRequestContainer<TSpec> {
|
||||
@ -61,46 +57,37 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
match self {
|
||||
// add more protocols when versions/encodings are supported
|
||||
OutboundRequest::Status(_) => vec![ProtocolId::new(
|
||||
Protocol::Status,
|
||||
Version::V1,
|
||||
SupportedProtocol::StatusV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
||||
Protocol::Goodbye,
|
||||
Version::V1,
|
||||
SupportedProtocol::GoodbyeV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::BlocksByRange(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy),
|
||||
],
|
||||
OutboundRequest::BlocksByRoot(_) => vec![
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy),
|
||||
],
|
||||
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
|
||||
Protocol::BlobsByRange,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlobsByRangeV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new(
|
||||
Protocol::BlobsByRoot,
|
||||
Version::V1,
|
||||
SupportedProtocol::BlobsByRootV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||
Protocol::Ping,
|
||||
Version::V1,
|
||||
SupportedProtocol::PingV1,
|
||||
Encoding::SSZSnappy,
|
||||
)],
|
||||
OutboundRequest::MetaData(_) => vec![
|
||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy),
|
||||
],
|
||||
// Note: This match arm is technically unreachable as we only respond to light client requests
|
||||
// that we generate from the beacon state.
|
||||
// We do not make light client rpc requests from the beacon node
|
||||
OutboundRequest::LightClientBootstrap(_) => vec![],
|
||||
}
|
||||
}
|
||||
/* These functions are used in the handler for stream management */
|
||||
@ -110,28 +97,35 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
match self {
|
||||
OutboundRequest::Status(_) => 1,
|
||||
OutboundRequest::Goodbye(_) => 0,
|
||||
OutboundRequest::BlocksByRange(req) => req.count,
|
||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||
OutboundRequest::BlocksByRange(req) => *req.count(),
|
||||
OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||
OutboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
||||
OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||
OutboundRequest::Ping(_) => 1,
|
||||
OutboundRequest::MetaData(_) => 1,
|
||||
OutboundRequest::LightClientBootstrap(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the corresponding `Protocol` to this request.
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
/// Gives the corresponding `SupportedProtocol` to this request.
|
||||
pub fn versioned_protocol(&self) -> SupportedProtocol {
|
||||
match self {
|
||||
OutboundRequest::Status(_) => Protocol::Status,
|
||||
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||
OutboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
||||
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
||||
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||
OutboundRequest::Status(_) => SupportedProtocol::StatusV1,
|
||||
OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1,
|
||||
OutboundRequest::BlocksByRange(req) => match req {
|
||||
OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1,
|
||||
OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2,
|
||||
},
|
||||
OutboundRequest::BlocksByRoot(req) => match req {
|
||||
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||
},
|
||||
OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||
OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||
OutboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||
OutboundRequest::MetaData(req) => match req {
|
||||
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||
MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,7 +139,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||
OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
|
||||
OutboundRequest::Status(_) => unreachable!(),
|
||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||
OutboundRequest::Ping(_) => unreachable!(),
|
||||
@ -205,9 +198,6 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
||||
OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
||||
write!(f, "Lightclient Bootstrap: {}", bootstrap.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,35 +205,80 @@ pub enum Protocol {
|
||||
LightClientBootstrap,
|
||||
}
|
||||
|
||||
/// RPC Versions
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Version {
|
||||
/// Version 1 of RPC
|
||||
V1,
|
||||
/// Version 2 of RPC
|
||||
V2,
|
||||
}
|
||||
|
||||
/// RPC Encondings supported.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
SSZSnappy,
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
pub(crate) fn terminator(self) -> Option<ResponseTermination> {
|
||||
/// All valid protocol name and version combinations.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum SupportedProtocol {
|
||||
StatusV1,
|
||||
GoodbyeV1,
|
||||
BlocksByRangeV1,
|
||||
BlocksByRangeV2,
|
||||
BlocksByRootV1,
|
||||
BlocksByRootV2,
|
||||
BlobsByRangeV1,
|
||||
BlobsByRootV1,
|
||||
PingV1,
|
||||
MetaDataV1,
|
||||
MetaDataV2,
|
||||
LightClientBootstrapV1,
|
||||
}
|
||||
|
||||
impl SupportedProtocol {
|
||||
pub fn version_string(&self) -> &'static str {
|
||||
match self {
|
||||
Protocol::Status => None,
|
||||
Protocol::Goodbye => None,
|
||||
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
|
||||
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
|
||||
Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange),
|
||||
Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot),
|
||||
Protocol::Ping => None,
|
||||
Protocol::MetaData => None,
|
||||
Protocol::LightClientBootstrap => None,
|
||||
SupportedProtocol::StatusV1 => "1",
|
||||
SupportedProtocol::GoodbyeV1 => "1",
|
||||
SupportedProtocol::BlocksByRangeV1 => "1",
|
||||
SupportedProtocol::BlocksByRangeV2 => "2",
|
||||
SupportedProtocol::BlocksByRootV1 => "1",
|
||||
SupportedProtocol::BlocksByRootV2 => "2",
|
||||
SupportedProtocol::BlobsByRangeV1 => "1",
|
||||
SupportedProtocol::BlobsByRootV1 => "1",
|
||||
SupportedProtocol::PingV1 => "1",
|
||||
SupportedProtocol::MetaDataV1 => "1",
|
||||
SupportedProtocol::MetaDataV2 => "2",
|
||||
SupportedProtocol::LightClientBootstrapV1 => "1",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
SupportedProtocol::StatusV1 => Protocol::Status,
|
||||
SupportedProtocol::GoodbyeV1 => Protocol::Goodbye,
|
||||
SupportedProtocol::BlocksByRangeV1 => Protocol::BlocksByRange,
|
||||
SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange,
|
||||
SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot,
|
||||
SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot,
|
||||
SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange,
|
||||
SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot,
|
||||
SupportedProtocol::PingV1 => Protocol::Ping,
|
||||
SupportedProtocol::MetaDataV1 => Protocol::MetaData,
|
||||
SupportedProtocol::MetaDataV2 => Protocol::MetaData,
|
||||
SupportedProtocol::LightClientBootstrapV1 => Protocol::LightClientBootstrap,
|
||||
}
|
||||
}
|
||||
|
||||
fn currently_supported() -> Vec<ProtocolId> {
|
||||
vec![
|
||||
ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy),
|
||||
// V2 variants have higher preference then V1
|
||||
ProtocolId::new(Self::BlocksByRangeV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::BlocksByRangeV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::BlocksByRootV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::BlocksByRootV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::BlobsByRangeV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::BlobsByRootV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::PingV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Encoding {
|
||||
@ -245,16 +290,6 @@ impl std::fmt::Display for Encoding {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Version {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let repr = match self {
|
||||
Version::V1 => "1",
|
||||
Version::V2 => "2",
|
||||
};
|
||||
f.write_str(repr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCProtocol<TSpec: EthSpec> {
|
||||
pub fork_context: Arc<ForkContext>,
|
||||
@ -269,30 +304,17 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
||||
|
||||
/// The list of supported RPC protocols for Lighthouse.
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
let mut supported_protocols = vec![
|
||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||
// V2 variants have higher preference then V1
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||
];
|
||||
let mut supported_protocols = SupportedProtocol::currently_supported();
|
||||
|
||||
if let ForkName::Deneb = self.fork_context.current_fork() {
|
||||
supported_protocols.extend_from_slice(&[
|
||||
ProtocolId::new(Protocol::BlobsByRoot, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy),
|
||||
ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy),
|
||||
]);
|
||||
}
|
||||
|
||||
if self.enable_light_client_server {
|
||||
supported_protocols.push(ProtocolId::new(
|
||||
Protocol::LightClientBootstrap,
|
||||
Version::V1,
|
||||
SupportedProtocol::LightClientBootstrapV1,
|
||||
Encoding::SSZSnappy,
|
||||
));
|
||||
}
|
||||
@ -322,11 +344,8 @@ impl RpcLimits {
|
||||
/// Tracks the types in a protocol id.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ProtocolId {
|
||||
/// The RPC message type/name.
|
||||
pub message_name: Protocol,
|
||||
|
||||
/// The version of the RPC.
|
||||
pub version: Version,
|
||||
/// The protocol name and version
|
||||
pub versioned_protocol: SupportedProtocol,
|
||||
|
||||
/// The encoding of the RPC.
|
||||
pub encoding: Encoding,
|
||||
@ -338,7 +357,7 @@ pub struct ProtocolId {
|
||||
impl ProtocolId {
|
||||
/// Returns min and max size for messages of given protocol id requests.
|
||||
pub fn rpc_request_limits(&self) -> RpcLimits {
|
||||
match self.message_name {
|
||||
match self.versioned_protocol.protocol() {
|
||||
Protocol::Status => RpcLimits::new(
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
@ -347,9 +366,10 @@ impl ProtocolId {
|
||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
// V1 and V2 requests are the same
|
||||
Protocol::BlocksByRange => RpcLimits::new(
|
||||
<OldBlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
<OldBlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
||||
<OldBlocksByRangeRequestV2 as Encode>::ssz_fixed_len(),
|
||||
<OldBlocksByRangeRequestV2 as Encode>::ssz_fixed_len(),
|
||||
),
|
||||
Protocol::BlocksByRoot => {
|
||||
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
||||
@ -376,7 +396,7 @@ impl ProtocolId {
|
||||
|
||||
/// Returns min and max size for messages of given protocol id responses.
|
||||
pub fn rpc_response_limits<T: EthSpec>(&self, fork_context: &ForkContext) -> RpcLimits {
|
||||
match self.message_name {
|
||||
match self.versioned_protocol.protocol() {
|
||||
Protocol::Status => RpcLimits::new(
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||
@ -407,31 +427,36 @@ impl ProtocolId {
|
||||
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
|
||||
/// beginning of the stream, else returns `false`.
|
||||
pub fn has_context_bytes(&self) -> bool {
|
||||
match self.message_name {
|
||||
Protocol::BlocksByRange | Protocol::BlocksByRoot => match self.version {
|
||||
Version::V2 => true,
|
||||
Version::V1 => false,
|
||||
},
|
||||
Protocol::LightClientBootstrap => match self.version {
|
||||
Version::V2 | Version::V1 => true,
|
||||
},
|
||||
Protocol::BlobsByRoot | Protocol::BlobsByRange => true,
|
||||
Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false,
|
||||
match self.versioned_protocol {
|
||||
SupportedProtocol::BlocksByRangeV2
|
||||
| SupportedProtocol::BlocksByRootV2
|
||||
| SupportedProtocol::BlobsByRangeV1
|
||||
| SupportedProtocol::BlobsByRootV1
|
||||
| SupportedProtocol::LightClientBootstrapV1 => true,
|
||||
SupportedProtocol::StatusV1
|
||||
| SupportedProtocol::BlocksByRootV1
|
||||
| SupportedProtocol::BlocksByRangeV1
|
||||
| SupportedProtocol::PingV1
|
||||
| SupportedProtocol::MetaDataV1
|
||||
| SupportedProtocol::MetaDataV2
|
||||
| SupportedProtocol::GoodbyeV1 => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An RPC protocol ID.
|
||||
impl ProtocolId {
|
||||
pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self {
|
||||
pub fn new(versioned_protocol: SupportedProtocol, encoding: Encoding) -> Self {
|
||||
let protocol_id = format!(
|
||||
"{}/{}/{}/{}",
|
||||
PROTOCOL_PREFIX, message_name, version, encoding
|
||||
PROTOCOL_PREFIX,
|
||||
versioned_protocol.protocol(),
|
||||
versioned_protocol.version_string(),
|
||||
encoding
|
||||
);
|
||||
|
||||
ProtocolId {
|
||||
message_name,
|
||||
version,
|
||||
versioned_protocol,
|
||||
encoding,
|
||||
protocol_id,
|
||||
}
|
||||
@ -464,7 +489,7 @@ where
|
||||
|
||||
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
|
||||
async move {
|
||||
let protocol_name = protocol.message_name;
|
||||
let versioned_protocol = protocol.versioned_protocol;
|
||||
// convert the socket to tokio compatible socket
|
||||
let socket = socket.compat();
|
||||
let codec = match protocol.encoding {
|
||||
@ -483,8 +508,13 @@ where
|
||||
let socket = Framed::new(Box::pin(timed_socket), codec);
|
||||
|
||||
// MetaData requests should be empty, return the stream
|
||||
match protocol_name {
|
||||
Protocol::MetaData => Ok((InboundRequest::MetaData(PhantomData), socket)),
|
||||
match versioned_protocol {
|
||||
SupportedProtocol::MetaDataV1 => {
|
||||
Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket))
|
||||
}
|
||||
SupportedProtocol::MetaDataV2 => {
|
||||
Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket))
|
||||
}
|
||||
_ => {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(REQUEST_TIMEOUT),
|
||||
@ -514,7 +544,7 @@ pub enum InboundRequest<TSpec: EthSpec> {
|
||||
BlobsByRoot(BlobsByRootRequest),
|
||||
LightClientBootstrap(LightClientBootstrapRequest),
|
||||
Ping(Ping),
|
||||
MetaData(PhantomData<TSpec>),
|
||||
MetaData(MetadataRequest<TSpec>),
|
||||
}
|
||||
|
||||
/// Implements the encoding per supported protocol for `RPCRequest`.
|
||||
@ -526,8 +556,8 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
match self {
|
||||
InboundRequest::Status(_) => 1,
|
||||
InboundRequest::Goodbye(_) => 0,
|
||||
InboundRequest::BlocksByRange(req) => req.count,
|
||||
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||
InboundRequest::BlocksByRange(req) => *req.count(),
|
||||
InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||
InboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
||||
InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||
InboundRequest::Ping(_) => 1,
|
||||
@ -536,18 +566,27 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gives the corresponding `Protocol` to this request.
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
/// Gives the corresponding `SupportedProtocol` to this request.
|
||||
pub fn versioned_protocol(&self) -> SupportedProtocol {
|
||||
match self {
|
||||
InboundRequest::Status(_) => Protocol::Status,
|
||||
InboundRequest::Goodbye(_) => Protocol::Goodbye,
|
||||
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||
InboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||
InboundRequest::Ping(_) => Protocol::Ping,
|
||||
InboundRequest::MetaData(_) => Protocol::MetaData,
|
||||
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||
InboundRequest::Status(_) => SupportedProtocol::StatusV1,
|
||||
InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1,
|
||||
InboundRequest::BlocksByRange(req) => match req {
|
||||
OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1,
|
||||
OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2,
|
||||
},
|
||||
InboundRequest::BlocksByRoot(req) => match req {
|
||||
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||
},
|
||||
InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||
InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||
InboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||
InboundRequest::MetaData(req) => match req {
|
||||
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||
MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2,
|
||||
},
|
||||
InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ pub trait RateLimiterItem {
|
||||
|
||||
impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
||||
fn protocol(&self) -> Protocol {
|
||||
self.protocol()
|
||||
self.versioned_protocol().protocol()
|
||||
}
|
||||
|
||||
fn expected_responses(&self) -> u64 {
|
||||
@ -224,7 +224,7 @@ impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
||||
|
||||
impl<T: EthSpec> RateLimiterItem for super::OutboundRequest<T> {
|
||||
fn protocol(&self) -> Protocol {
|
||||
self.protocol()
|
||||
self.versioned_protocol().protocol()
|
||||
}
|
||||
|
||||
fn expected_responses(&self) -> u64 {
|
||||
|
@ -72,7 +72,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
||||
request_id: Id,
|
||||
req: OutboundRequest<TSpec>,
|
||||
) -> Result<BehaviourAction<Id, TSpec>, Error> {
|
||||
let protocol = req.protocol();
|
||||
let protocol = req.versioned_protocol().protocol();
|
||||
// First check that there are not already other requests waiting to be sent.
|
||||
if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) {
|
||||
queued_requests.push_back(QueuedRequest { req, request_id });
|
||||
@ -111,7 +111,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
||||
event: RPCSend::Request(request_id, req),
|
||||
}),
|
||||
Err(e) => {
|
||||
let protocol = req.protocol();
|
||||
let protocol = req.versioned_protocol();
|
||||
match e {
|
||||
RateLimitedErr::TooLarge => {
|
||||
// this should never happen with default parameters. Let's just send the request.
|
||||
@ -119,7 +119,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
||||
crit!(
|
||||
log,
|
||||
"Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters.";
|
||||
"protocol" => %req.protocol()
|
||||
"protocol" => %req.versioned_protocol().protocol()
|
||||
);
|
||||
Ok(BehaviourAction::NotifyHandler {
|
||||
peer_id,
|
||||
@ -128,7 +128,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
||||
})
|
||||
}
|
||||
RateLimitedErr::TooSoon(wait_time) => {
|
||||
debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id);
|
||||
debug!(log, "Self rate limiting"; "protocol" => %protocol.protocol(), "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id);
|
||||
Err((QueuedRequest { req, request_id }, wait_time))
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,8 @@ use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||
use crate::rpc::{
|
||||
methods::{
|
||||
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
||||
OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage,
|
||||
OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2,
|
||||
RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage,
|
||||
},
|
||||
OutboundRequest, SubstreamId,
|
||||
};
|
||||
@ -48,15 +49,26 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||
fn from(req: Request) -> OutboundRequest<TSpec> {
|
||||
match req {
|
||||
Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r),
|
||||
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => {
|
||||
OutboundRequest::BlocksByRange(OldBlocksByRangeRequest {
|
||||
start_slot,
|
||||
count,
|
||||
Request::BlocksByRange(r) => match r {
|
||||
BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 {
|
||||
start_slot: req.start_slot,
|
||||
count: req.count,
|
||||
step: 1,
|
||||
})
|
||||
}),
|
||||
),
|
||||
BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange(
|
||||
OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 {
|
||||
start_slot: req.start_slot,
|
||||
count: req.count,
|
||||
step: 1,
|
||||
}),
|
||||
),
|
||||
},
|
||||
Request::LightClientBootstrap(_) => {
|
||||
unreachable!("Lighthouse never makes an outbound light client request")
|
||||
}
|
||||
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
||||
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
|
||||
Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r),
|
||||
Request::Status(s) => OutboundRequest::Status(s),
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ use crate::peer_manager::{
|
||||
ConnectionDirection, PeerManager, PeerManagerEvent,
|
||||
};
|
||||
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
|
||||
use crate::rpc::methods::MetadataRequest;
|
||||
use crate::rpc::*;
|
||||
use crate::service::behaviour::BehaviourEvent;
|
||||
pub use crate::service::behaviour::Gossipsub;
|
||||
@ -37,7 +38,6 @@ use slog::{crit, debug, info, o, trace, warn};
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
@ -949,16 +949,25 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
|
||||
/// Sends a METADATA request to a peer.
|
||||
fn send_meta_data_request(&mut self, peer_id: PeerId) {
|
||||
let event = OutboundRequest::MetaData(PhantomData);
|
||||
// We always prefer sending V2 requests
|
||||
let event = OutboundRequest::MetaData(MetadataRequest::new_v2());
|
||||
self.eth2_rpc_mut()
|
||||
.send_request(peer_id, RequestId::Internal, event);
|
||||
}
|
||||
|
||||
/// Sends a METADATA response to a peer.
|
||||
fn send_meta_data_response(&mut self, id: PeerRequestId, peer_id: PeerId) {
|
||||
let event = RPCCodedResponse::Success(RPCResponse::MetaData(
|
||||
self.network_globals.local_metadata.read().clone(),
|
||||
));
|
||||
fn send_meta_data_response(
|
||||
&mut self,
|
||||
req: MetadataRequest<TSpec>,
|
||||
id: PeerRequestId,
|
||||
peer_id: PeerId,
|
||||
) {
|
||||
let metadata = self.network_globals.local_metadata.read().clone();
|
||||
let metadata = match req {
|
||||
MetadataRequest::V1(_) => metadata.metadata_v1(),
|
||||
MetadataRequest::V2(_) => metadata,
|
||||
};
|
||||
let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata));
|
||||
self.eth2_rpc_mut().send_response(peer_id, id, event);
|
||||
}
|
||||
|
||||
@ -1207,9 +1216,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
self.pong(peer_request_id, peer_id);
|
||||
None
|
||||
}
|
||||
InboundRequest::MetaData(_) => {
|
||||
InboundRequest::MetaData(req) => {
|
||||
// send the requested meta-data
|
||||
self.send_meta_data_response((handler_id, id), peer_id);
|
||||
self.send_meta_data_response(req, (handler_id, id), peer_id);
|
||||
None
|
||||
}
|
||||
InboundRequest::Goodbye(reason) => {
|
||||
@ -1236,13 +1245,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
Some(event)
|
||||
}
|
||||
InboundRequest::BlocksByRange(req) => {
|
||||
let methods::OldBlocksByRangeRequest {
|
||||
start_slot,
|
||||
mut count,
|
||||
step,
|
||||
} = req;
|
||||
// Still disconnect the peer if the request is naughty.
|
||||
if step == 0 {
|
||||
let mut count = *req.count();
|
||||
if *req.step() == 0 {
|
||||
self.peer_manager_mut().handle_rpc_error(
|
||||
&peer_id,
|
||||
Protocol::BlocksByRange,
|
||||
@ -1254,14 +1259,18 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
return None;
|
||||
}
|
||||
// return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856
|
||||
if step > 1 {
|
||||
if *req.step() > 1 {
|
||||
count = 1;
|
||||
}
|
||||
let event = self.build_request(
|
||||
peer_request_id,
|
||||
peer_id,
|
||||
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }),
|
||||
);
|
||||
let request = match req {
|
||||
methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange(
|
||||
BlocksByRangeRequest::new_v1(req.start_slot, count),
|
||||
),
|
||||
methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange(
|
||||
BlocksByRangeRequest::new(req.start_slot, count),
|
||||
),
|
||||
};
|
||||
let event = self.build_request(peer_request_id, peer_id, request);
|
||||
Some(event)
|
||||
}
|
||||
InboundRequest::BlocksByRoot(req) => {
|
||||
|
@ -276,9 +276,11 @@ pub(crate) fn save_metadata_to_disk<E: EthSpec>(
|
||||
log: &slog::Logger,
|
||||
) {
|
||||
let _ = std::fs::create_dir_all(dir);
|
||||
match File::create(dir.join(METADATA_FILENAME))
|
||||
.and_then(|mut f| f.write_all(&metadata.as_ssz_bytes()))
|
||||
{
|
||||
let metadata_bytes = match metadata {
|
||||
MetaData::V1(md) => md.as_ssz_bytes(),
|
||||
MetaData::V2(md) => md.as_ssz_bytes(),
|
||||
};
|
||||
match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) {
|
||||
Ok(_) => {
|
||||
debug!(log, "Metadata written to disk");
|
||||
}
|
||||
|
@ -155,10 +155,7 @@ fn test_blocks_by_range_chunked_rpc() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: messages_to_send,
|
||||
});
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||
|
||||
let spec = E::default_spec();
|
||||
|
||||
@ -282,10 +279,7 @@ fn test_blocks_by_range_over_limit() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: messages_to_send,
|
||||
});
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||
|
||||
// BlocksByRange Response
|
||||
let full_block = merge_block_large(&common::fork_context(ForkName::Merge));
|
||||
@ -367,10 +361,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: messages_to_send,
|
||||
});
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||
|
||||
// BlocksByRange Response
|
||||
let spec = E::default_spec();
|
||||
@ -490,10 +481,7 @@ fn test_blocks_by_range_single_empty_rpc() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||
|
||||
// BlocksByRange Request
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||
start_slot: 0,
|
||||
count: 10,
|
||||
});
|
||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10));
|
||||
|
||||
// BlocksByRange Response
|
||||
let spec = E::default_spec();
|
||||
@ -594,16 +582,15 @@ fn test_blocks_by_root_chunked_rpc() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||
|
||||
// BlocksByRoot Request
|
||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![
|
||||
let rpc_request =
|
||||
Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
]),
|
||||
});
|
||||
])));
|
||||
|
||||
// BlocksByRoot Response
|
||||
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
||||
@ -722,8 +709,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||
|
||||
// BlocksByRoot Request
|
||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![
|
||||
let rpc_request =
|
||||
Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
@ -734,8 +721,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
Hash256::from_low_u64_be(0),
|
||||
]),
|
||||
});
|
||||
])));
|
||||
|
||||
// BlocksByRoot Response
|
||||
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
||||
|
@ -9,8 +9,8 @@ use beacon_chain::{
|
||||
observed_operations::ObservationOutcome,
|
||||
sync_committee_verification::{self, Error as SyncCommitteeError},
|
||||
validator_monitor::get_block_delay_ms,
|
||||
AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized,
|
||||
ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer,
|
||||
AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError,
|
||||
GossipVerifiedBlock, NotifyExecutionLayer,
|
||||
};
|
||||
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
@ -756,11 +756,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
let blob_root = verified_blob.block_root();
|
||||
let blob_slot = verified_blob.slot();
|
||||
let blob_clone = verified_blob.clone().to_blob();
|
||||
match self
|
||||
.chain
|
||||
.process_blob(verified_blob, CountUnrealized::True)
|
||||
.await
|
||||
{
|
||||
match self.chain.process_blob(verified_blob).await {
|
||||
Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
|
||||
//TODO(sean) add metrics and logging
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
@ -978,7 +974,6 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
| Err(e @ BlockError::NonLinearParentRoots)
|
||||
| Err(e @ BlockError::BlockIsNotLaterThanParent { .. })
|
||||
| Err(e @ BlockError::InvalidSignature)
|
||||
| Err(e @ BlockError::TooManySkippedSlots { .. })
|
||||
| Err(e @ BlockError::WeakSubjectivityConflict)
|
||||
| Err(e @ BlockError::InconsistentFork(_))
|
||||
| Err(e @ BlockError::ExecutionPayloadError(_))
|
||||
@ -1103,12 +1098,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
let result = self
|
||||
.chain
|
||||
.process_block(
|
||||
block_root,
|
||||
verified_block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes)
|
||||
.await;
|
||||
|
||||
match &result {
|
||||
|
@ -139,10 +139,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
request_id: PeerRequestId,
|
||||
request: BlocksByRootRequest,
|
||||
) {
|
||||
let requested_blocks = request.block_roots.len();
|
||||
let requested_blocks = request.block_roots().len();
|
||||
let mut block_stream = match self
|
||||
.chain
|
||||
.get_blocks_checking_early_attester_cache(request.block_roots.into(), &executor)
|
||||
.get_blocks_checking_early_attester_cache(request.block_roots().to_vec(), &executor)
|
||||
{
|
||||
Ok(block_stream) => block_stream,
|
||||
Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e),
|
||||
@ -375,27 +375,22 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
send_on_drop: SendOnDrop,
|
||||
peer_id: PeerId,
|
||||
request_id: PeerRequestId,
|
||||
req: BlocksByRangeRequest,
|
||||
mut req: BlocksByRangeRequest,
|
||||
) {
|
||||
debug!(self.log, "Received BlocksByRange Request";
|
||||
"peer_id" => %peer_id,
|
||||
"count" => req.count,
|
||||
"start_slot" => req.start_slot,
|
||||
"count" => req.count(),
|
||||
"start_slot" => req.start_slot(),
|
||||
);
|
||||
|
||||
// Should not send more than max request blocks
|
||||
if req.count > MAX_REQUEST_BLOCKS {
|
||||
return self.send_error_response(
|
||||
peer_id,
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`".into(),
|
||||
request_id,
|
||||
);
|
||||
if *req.count() > MAX_REQUEST_BLOCKS {
|
||||
*req.count_mut() = MAX_REQUEST_BLOCKS;
|
||||
}
|
||||
|
||||
let forwards_block_root_iter = match self
|
||||
.chain
|
||||
.forwards_iter_block_roots(Slot::from(req.start_slot))
|
||||
.forwards_iter_block_roots(Slot::from(*req.start_slot()))
|
||||
{
|
||||
Ok(iter) => iter,
|
||||
Err(BeaconChainError::HistoricalBlockError(
|
||||
@ -432,7 +427,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
// Pick out the required blocks, ignoring skip-slots.
|
||||
let mut last_block_root = req
|
||||
.start_slot
|
||||
.start_slot()
|
||||
.checked_sub(1)
|
||||
.map(|prev_slot| {
|
||||
self.chain
|
||||
@ -443,7 +438,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
.flatten()
|
||||
.flatten();
|
||||
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
||||
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
|
||||
iter.take_while(|(_, slot)| {
|
||||
slot.as_u64() < req.start_slot().saturating_add(*req.count())
|
||||
})
|
||||
// map skip slots to None
|
||||
.map(|(root, _)| {
|
||||
let result = if Some(root) == last_block_root {
|
||||
@ -487,8 +484,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
Ok(Some(block)) => {
|
||||
// Due to skip slots, blocks could be out of the range, we ensure they
|
||||
// are in the range before sending
|
||||
if block.slot() >= req.start_slot
|
||||
&& block.slot() < req.start_slot + req.count
|
||||
if block.slot() >= *req.start_slot()
|
||||
&& block.slot() < req.start_slot() + req.count()
|
||||
{
|
||||
blocks_sent += 1;
|
||||
self.send_network_message(NetworkMessage::SendResponse {
|
||||
@ -572,15 +569,15 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
.slot()
|
||||
.unwrap_or_else(|_| self.chain.slot_clock.genesis_slot());
|
||||
|
||||
if blocks_sent < (req.count as usize) {
|
||||
if blocks_sent < (*req.count() as usize) {
|
||||
debug!(
|
||||
self.log,
|
||||
"BlocksByRange outgoing response processed";
|
||||
"peer" => %peer_id,
|
||||
"msg" => "Failed to return all requested blocks",
|
||||
"start_slot" => req.start_slot,
|
||||
"start_slot" => req.start_slot(),
|
||||
"current_slot" => current_slot,
|
||||
"requested" => req.count,
|
||||
"requested" => req.count(),
|
||||
"returned" => blocks_sent
|
||||
);
|
||||
} else {
|
||||
@ -588,9 +585,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
self.log,
|
||||
"BlocksByRange outgoing response processed";
|
||||
"peer" => %peer_id,
|
||||
"start_slot" => req.start_slot,
|
||||
"start_slot" => req.start_slot(),
|
||||
"current_slot" => current_slot,
|
||||
"requested" => req.count,
|
||||
"requested" => req.count(),
|
||||
"returned" => blocks_sent
|
||||
);
|
||||
}
|
||||
|
@ -10,12 +10,12 @@ use crate::sync::{BatchProcessResult, ChainId};
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock};
|
||||
use beacon_chain::data_availability_checker::AvailabilityCheckError;
|
||||
use beacon_chain::AvailabilityProcessingStatus;
|
||||
use beacon_chain::{
|
||||
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
||||
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
||||
NotifyExecutionLayer,
|
||||
};
|
||||
use beacon_chain::{AvailabilityProcessingStatus, CountUnrealized};
|
||||
use lighthouse_network::PeerAction;
|
||||
use slog::{debug, error, info, warn};
|
||||
use slot_clock::SlotClock;
|
||||
@ -28,7 +28,7 @@ use types::{Epoch, Hash256};
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum ChainSegmentProcessId {
|
||||
/// Processing Id of a range syncing batch.
|
||||
RangeBatchId(ChainId, Epoch, CountUnrealized),
|
||||
RangeBatchId(ChainId, Epoch),
|
||||
/// Processing ID for a backfill syncing batch.
|
||||
BackSyncBatchId(Epoch),
|
||||
/// Processing Id of the parent lookup of a block.
|
||||
@ -171,12 +171,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
let result = self
|
||||
.chain
|
||||
.process_block(
|
||||
block_root,
|
||||
block,
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.process_block(block_root, block, NotifyExecutionLayer::Yes)
|
||||
.await;
|
||||
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||
@ -233,15 +228,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
let result = self
|
||||
.chain
|
||||
.check_availability_and_maybe_import(
|
||||
slot,
|
||||
|chain| {
|
||||
.check_availability_and_maybe_import(slot, |chain| {
|
||||
chain
|
||||
.data_availability_checker
|
||||
.put_rpc_blobs(block_root, blobs)
|
||||
},
|
||||
CountUnrealized::True,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
|
||||
// Sync handles these results
|
||||
@ -262,17 +253,13 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
) {
|
||||
let result = match sync_type {
|
||||
// this a request from the range sync
|
||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => {
|
||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
|
||||
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
||||
let sent_blocks = downloaded_blocks.len();
|
||||
|
||||
match self
|
||||
.process_blocks(
|
||||
downloaded_blocks.iter(),
|
||||
count_unrealized,
|
||||
notify_execution_layer,
|
||||
)
|
||||
.process_blocks(downloaded_blocks.iter(), notify_execution_layer)
|
||||
.await
|
||||
{
|
||||
(_, Ok(_)) => {
|
||||
@ -357,11 +344,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
||||
// reverse
|
||||
match self
|
||||
.process_blocks(
|
||||
downloaded_blocks.iter().rev(),
|
||||
CountUnrealized::True,
|
||||
notify_execution_layer,
|
||||
)
|
||||
.process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer)
|
||||
.await
|
||||
{
|
||||
(imported_blocks, Err(e)) => {
|
||||
@ -391,13 +374,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
async fn process_blocks<'a>(
|
||||
&self,
|
||||
downloaded_blocks: impl Iterator<Item = &'a BlockWrapper<T::EthSpec>>,
|
||||
count_unrealized: CountUnrealized,
|
||||
notify_execution_layer: NotifyExecutionLayer,
|
||||
) -> (usize, Result<(), ChainSegmentFailed>) {
|
||||
let blocks: Vec<_> = downloaded_blocks.cloned().collect();
|
||||
match self
|
||||
.chain
|
||||
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
||||
.process_chain_segment(blocks, notify_execution_layer)
|
||||
.await
|
||||
{
|
||||
ChainSegmentResult::Successful { imported_blocks } => {
|
||||
|
@ -491,9 +491,9 @@ impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS
|
||||
self.block_request_state.state.state,
|
||||
State::AwaitingDownload
|
||||
));
|
||||
let request = BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![self.block_request_state.requested_block_root]),
|
||||
};
|
||||
let request = BlocksByRootRequest::new(VariableList::from(vec![
|
||||
self.block_request_state.requested_block_root,
|
||||
]));
|
||||
let response_type = ResponseType::Block;
|
||||
if self.too_many_attempts(response_type) {
|
||||
Err(LookupRequestError::TooManyAttempts {
|
||||
|
@ -707,7 +707,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
.parent_block_processed(chain_hash, result, response_type, &mut self.network),
|
||||
},
|
||||
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
|
||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||
self.range_sync.handle_block_process_result(
|
||||
&mut self.network,
|
||||
chain_id,
|
||||
|
@ -162,7 +162,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending BlocksByRange request";
|
||||
"method" => "BlocksByRange",
|
||||
"count" => request.count,
|
||||
"count" => request.count(),
|
||||
"peer" => %peer_id,
|
||||
);
|
||||
let request = Request::BlocksByRange(request);
|
||||
@ -181,7 +181,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending BlocksByRange and BlobsByRange requests";
|
||||
"method" => "Mixed by range request",
|
||||
"count" => request.count,
|
||||
"count" => request.count(),
|
||||
"peer" => %peer_id,
|
||||
);
|
||||
|
||||
@ -191,8 +191,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
|
||||
// Create the blob request based on the blob request.
|
||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||
start_slot: request.start_slot,
|
||||
count: request.count,
|
||||
start_slot: *request.start_slot(),
|
||||
count: *request.count(),
|
||||
});
|
||||
let blocks_request = Request::BlocksByRange(request);
|
||||
|
||||
@ -235,7 +235,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending backfill BlocksByRange request";
|
||||
"method" => "BlocksByRange",
|
||||
"count" => request.count,
|
||||
"count" => request.count(),
|
||||
"peer" => %peer_id,
|
||||
);
|
||||
let request = Request::BlocksByRange(request);
|
||||
@ -254,7 +254,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending backfill BlocksByRange and BlobsByRange requests";
|
||||
"method" => "Mixed by range request",
|
||||
"count" => request.count,
|
||||
"count" => request.count(),
|
||||
"peer" => %peer_id,
|
||||
);
|
||||
|
||||
@ -264,8 +264,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
|
||||
// Create the blob request based on the blob request.
|
||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||
start_slot: request.start_slot,
|
||||
count: request.count,
|
||||
start_slot: *request.start_slot(),
|
||||
count: *request.count(),
|
||||
});
|
||||
let blocks_request = Request::BlocksByRange(request);
|
||||
|
||||
@ -422,7 +422,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"count" => request.block_roots().len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
@ -472,7 +472,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
self.log,
|
||||
"Sending parent BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"count" => request.block_roots().len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
|
@ -220,10 +220,10 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
||||
/// Returns a BlocksByRange request associated with the batch.
|
||||
pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) {
|
||||
(
|
||||
BlocksByRangeRequest {
|
||||
start_slot: self.start_slot.into(),
|
||||
count: self.end_slot.sub(self.start_slot).into(),
|
||||
},
|
||||
BlocksByRangeRequest::new(
|
||||
self.start_slot.into(),
|
||||
self.end_slot.sub(self.start_slot).into(),
|
||||
),
|
||||
self.batch_type,
|
||||
)
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use crate::sync::{
|
||||
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
||||
};
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::{BeaconChainTypes, CountUnrealized};
|
||||
use beacon_chain::BeaconChainTypes;
|
||||
use fnv::FnvHashMap;
|
||||
use lighthouse_network::{PeerAction, PeerId};
|
||||
use rand::seq::SliceRandom;
|
||||
@ -101,8 +101,6 @@ pub struct SyncingChain<T: BeaconChainTypes> {
|
||||
/// Batches validated by this chain.
|
||||
validated_batches: u64,
|
||||
|
||||
is_finalized_segment: bool,
|
||||
|
||||
/// The chain's log.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@ -128,7 +126,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
target_head_slot: Slot,
|
||||
target_head_root: Hash256,
|
||||
peer_id: PeerId,
|
||||
is_finalized_segment: bool,
|
||||
log: &slog::Logger,
|
||||
) -> Self {
|
||||
let mut peers = FnvHashMap::default();
|
||||
@ -136,16 +133,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
|
||||
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
||||
|
||||
let target_slot = if is_finalized_segment {
|
||||
target_head_slot + (2 * T::EthSpec::slots_per_epoch()) + 1
|
||||
} else {
|
||||
target_head_slot
|
||||
};
|
||||
|
||||
SyncingChain {
|
||||
id,
|
||||
start_epoch,
|
||||
target_head_slot: target_slot,
|
||||
target_head_slot,
|
||||
target_head_root,
|
||||
batches: BTreeMap::new(),
|
||||
peers,
|
||||
@ -156,7 +147,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
state: ChainSyncingState::Stopped,
|
||||
current_processing_batch: None,
|
||||
validated_batches: 0,
|
||||
is_finalized_segment,
|
||||
log: log.new(o!("chain" => id)),
|
||||
}
|
||||
}
|
||||
@ -324,12 +314,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
// for removing chains and checking completion is in the callback.
|
||||
|
||||
let blocks = batch.start_processing()?;
|
||||
let count_unrealized = if self.is_finalized_segment {
|
||||
CountUnrealized::False
|
||||
} else {
|
||||
CountUnrealized::True
|
||||
};
|
||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id);
|
||||
self.current_processing_batch = Some(batch_id);
|
||||
|
||||
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks);
|
||||
|
@ -465,10 +465,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
||||
network: &mut SyncNetworkContext<T>,
|
||||
) {
|
||||
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
||||
let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type {
|
||||
(&mut self.finalized_chains, true)
|
||||
let collection = if let RangeSyncType::Finalized = sync_type {
|
||||
&mut self.finalized_chains
|
||||
} else {
|
||||
(&mut self.head_chains, false)
|
||||
&mut self.head_chains
|
||||
};
|
||||
match collection.entry(id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
@ -493,7 +493,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
||||
target_head_slot,
|
||||
target_head_root,
|
||||
peer,
|
||||
is_finalized,
|
||||
&self.log,
|
||||
);
|
||||
debug_assert_eq!(new_chain.get_id(), id);
|
||||
|
@ -123,7 +123,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.value_name("PORT")
|
||||
.help("The UDP port that discovery will listen on over IpV6 if listening over \
|
||||
both Ipv4 and IpV6. Defaults to `port6`")
|
||||
.hidden(true) // TODO: implement dual stack via two sockets in discv5.
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
@ -205,7 +204,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
discovery. Set this only if you are sure other nodes can connect to your \
|
||||
local node on this address. This will update the `ip4` or `ip6` ENR fields \
|
||||
accordingly. To update both, set this flag twice with the different values.")
|
||||
.requires("enr-udp-port")
|
||||
.multiple(true)
|
||||
.max_values(2)
|
||||
.takes_value(true),
|
||||
@ -729,7 +727,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
Arg::with_name("max-skip-slots")
|
||||
.long("max-skip-slots")
|
||||
.help(
|
||||
"Refuse to skip more than this many slots when processing a block or attestation. \
|
||||
"Refuse to skip more than this many slots when processing an attestation. \
|
||||
This prevents nodes on minority forks from wasting our time and disk space, \
|
||||
but could also cause unnecessary consensus failures, so is disabled by default."
|
||||
)
|
||||
|
@ -37,7 +37,7 @@
|
||||
\rput[bl](9.0,-3.49){27.3 hours}
|
||||
\rput[bl](8.8,-5.49){Varying time}
|
||||
\rput[bl](8.7,-5.99){validator sweep}
|
||||
\rput[bl](8.9,-6.59){up to 5 days}
|
||||
\rput[bl](8.9,-6.59){up to \textit{n} days}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29)
|
||||
|
@ -31,7 +31,7 @@
|
||||
\rput[bl](0.9,-1.59){Beacon chain}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09)
|
||||
\rput[bl](7.6,-3.99){validator sweep}
|
||||
\rput[bl](7.5,-4.69){$\sim$ every 5 days}
|
||||
\rput[bl](7.82,-4.73){every \textit{n} days}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09)
|
||||
\rput[bl](1.3,-4.09){BLS to}
|
||||
\rput[bl](0.5,-4.69){execution change}
|
||||
|
@ -47,7 +47,7 @@
|
||||
* [Running a Slasher](./slasher.md)
|
||||
* [Redundancy](./redundancy.md)
|
||||
* [Release Candidates](./advanced-release-candidates.md)
|
||||
* [Maximal Extractable Value (MEV)](./builders.md)
|
||||
* [MEV](./builders.md)
|
||||
* [Merge Migration](./merge-migration.md)
|
||||
* [Late Block Re-orgs](./late-block-re-orgs.md)
|
||||
* [Contributing](./contributing.md)
|
||||
|
@ -28,7 +28,7 @@ some example values.
|
||||
| Research | 32 | 3.4 TB | 155 ms |
|
||||
| Block explorer/analysis | 128 | 851 GB | 620 ms |
|
||||
| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s |
|
||||
| EHobbyist | 4096 | 26.8 GB | 20.5 s |
|
||||
| Hobbyist | 4096 | 26.8 GB | 20.5 s |
|
||||
| Validator only (default) | 8192 | 8.1 GB | 41 s |
|
||||
|
||||
*Last update: May 2023.
|
||||
|
@ -38,7 +38,6 @@ large peer count will not speed up sync.
|
||||
For these reasons, we recommend users do not modify the `--target-peers` count
|
||||
drastically and use the (recommended) default.
|
||||
|
||||
|
||||
### NAT Traversal (Port Forwarding)
|
||||
|
||||
Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will
|
||||
@ -107,3 +106,78 @@ Modifying the ENR settings can degrade the discovery of your node, making it
|
||||
harder for peers to find you or potentially making it harder for other peers to
|
||||
find each other. We recommend not touching these settings unless for a more
|
||||
advanced use case.
|
||||
|
||||
|
||||
### IPv6 support
|
||||
|
||||
As noted in the previous sections, two fundamental parts to ensure good
|
||||
connectivity are: The parameters that configure the sockets over which
|
||||
Lighthouse listens for connections, and the parameters used to tell other peers
|
||||
how to connect to your node. This distinction is relevant and applies to most
|
||||
nodes that do not run directly on a public network.
|
||||
|
||||
#### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack
|
||||
|
||||
To listen over only IPv6 use the same parameters as done when listening over
|
||||
IPv4 only:
|
||||
|
||||
- `--listen-addresses :: --port 9909` will listen over IPv6 using port `9909` for
|
||||
TCP and UDP.
|
||||
- `--listen-addresses :: --port 9909 --discovery-port 9999` will listen over
|
||||
IPv6 using port `9909` for TCP and port `9999` for UDP.
|
||||
|
||||
To listen over both IPv4 and IPv6:
|
||||
- Set two listening addresses using the `--listen-addresses` flag twice ensuring
|
||||
the two addresses are one IPv4, and the other IPv6. When doing so, the
|
||||
`--port` and `--discovery-port` flags will apply exclusively to IPv4. Note
|
||||
that this behaviour differs from the Ipv6 only case described above.
|
||||
- If necessary, set the `--port6` flag to configure the port used for TCP and
|
||||
UDP over IPv6. This flag has no effect when listening over IPv6 only.
|
||||
- If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP
|
||||
port. This will default to the value given to `--port6` if not set. This flag
|
||||
has no effect when listening over IPv6 only.
|
||||
|
||||
##### Configuration Examples
|
||||
|
||||
- `--listen-addresses :: --listen-addresses 0.0.0.0 --port 9909` will listen
|
||||
over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but
|
||||
using the default value for `--port6` for UDP and TCP (`9090`).
|
||||
- `--listen-addresses :: --listen-addresses --port 9909 --discovery-port6 9999`
|
||||
will have the same configuration as before except for the IPv6 UDP socket,
|
||||
which will use port `9999`.
|
||||
|
||||
#### Configuring Lighthouse to advertise IPv6 reachable addresses
|
||||
Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively,
|
||||
and dual stack using one socket for IPv6 and another socket for IPv6. In both
|
||||
scenarios, the previous sections still apply. In summary:
|
||||
|
||||
> Beacon nodes must advertise their publicly reachable socket address
|
||||
|
||||
In order to do so, lighthouse provides the following CLI options/parameters.
|
||||
|
||||
- `--enr-udp-port` Use this to advertise the port that is publicly reachable
|
||||
over UDP with a publicly reachable IPv4 address. This might differ from the
|
||||
IPv4 port used to listen.
|
||||
- `--enr-udp6-port` Use this to advertise the port that is publicly reachable
|
||||
over UDP with a publicly reachable IPv6 address. This might differ from the
|
||||
IPv6 port used to listen.
|
||||
- `--enr-tcp-port` Use this to advertise the port that is publicly reachable
|
||||
over TCP with a publicly reachable IPv4 address. This might differ from the
|
||||
IPv4 port used to listen.
|
||||
- `--enr-tcp6-port` Use this to advertise the port that is publicly reachable
|
||||
over TCP with a publicly reachable IPv6 address. This might differ from the
|
||||
IPv6 port used to listen.
|
||||
- `--enr-addresses` Use this to advertise publicly reachable addresses. Takes at
|
||||
most two values, one for IPv4 and one for IPv6. Note that a beacon node that
|
||||
advertises some address, must be
|
||||
reachable both over UDP and TCP.
|
||||
|
||||
In the general case, an user will not require to set these explicitly. Update
|
||||
these options only if you can guarantee your node is reachable with these
|
||||
values.
|
||||
|
||||
#### Known caveats
|
||||
|
||||
IPv6 link local addresses are likely to have poor connectivity if used in
|
||||
topologies with more than one interface. Use global addresses for the general
|
||||
case.
|
||||
|
@ -426,7 +426,8 @@ Example Response Body
|
||||
|
||||
## `PATCH /lighthouse/validators/:voting_pubkey`
|
||||
|
||||
Update some values for the validator with `voting_pubkey`. The following example updates a validator from `enabled: true` to `enabled: false`
|
||||
Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`,
|
||||
and `graffiti`. The following example updates a validator from `enabled: true` to `enabled: false`.
|
||||
|
||||
### HTTP Specification
|
||||
|
||||
|
@ -108,13 +108,14 @@ Command:
|
||||
```bash
|
||||
DATADIR=/var/lib/lighthouse
|
||||
curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \
|
||||
-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)" \
|
||||
-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"builder_proposals": true,
|
||||
"gas_limit": 30000001
|
||||
}' | jq
|
||||
```
|
||||
If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`
|
||||
|
||||
#### Example Response Body
|
||||
|
||||
|
@ -25,10 +25,11 @@
|
||||
## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1)
|
||||
- [I have a low peer count and it is not increasing](#net-peer)
|
||||
- [How do I update lighthouse?](#net-update)
|
||||
- [Do I need to set up any port mappings (port forwarding)?](#net-port)
|
||||
- [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding)
|
||||
- [How can I monitor my validators?](#net-monitor)
|
||||
- [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc)
|
||||
- [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip)
|
||||
- [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port)
|
||||
|
||||
|
||||
## [Miscellaneous](#miscellaneous-1)
|
||||
@ -360,7 +361,7 @@ $ docker pull sigp/lighthouse:v1.0.0
|
||||
If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image)
|
||||
You just need to make sure the code you have checked out is up to date.
|
||||
|
||||
### <a name="net-port"></a> Do I need to set up any port mappings (port forwarding)?
|
||||
### <a name="net-port-forwarding"></a> Do I need to set up any port mappings (port forwarding)?
|
||||
|
||||
It is not strictly required to open any ports for Lighthouse to connect and
|
||||
participate in the network. Lighthouse should work out-of-the-box. However, if
|
||||
@ -386,7 +387,7 @@ For these reasons, we recommend that you make your node publicly accessible.
|
||||
|
||||
Lighthouse supports UPnP. If you are behind a NAT with a router that supports
|
||||
UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its
|
||||
initial logs if a route has been established). You can also manually [set up port mappings](./advanced_networking.md) in your router to your local Lighthouse instance. By default,
|
||||
initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md#how-to-open-ports) in your router to your local Lighthouse instance. By default,
|
||||
Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will
|
||||
make your Lighthouse node maximally contactable.
|
||||
|
||||
@ -421,6 +422,9 @@ The settings are as follows:
|
||||
### <a name="net-ip"></a> Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?
|
||||
No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used).
|
||||
|
||||
### <a name="net-port"></a> How to change the TCP/UDP port 9000 that Lighthouse listens on?
|
||||
Use the flag ```--port <PORT>``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
### <a name="misc-slashing"></a> What should I do if I lose my slashing protection database?
|
||||
|
@ -29,6 +29,8 @@ Lighthouse will first search for the graffiti corresponding to the public key of
|
||||
### 2. Setting the graffiti in the `validator_definitions.yml`
|
||||
Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal.
|
||||
|
||||
You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http).
|
||||
|
||||
Below is an example of the validator_definitions.yml with validator specific graffitis:
|
||||
```
|
||||
---
|
||||
@ -62,3 +64,25 @@ Usage: `lighthouse bn --graffiti fortytwo`
|
||||
> 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client.
|
||||
> 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node.
|
||||
> 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti.
|
||||
|
||||
### Set Graffiti via HTTP
|
||||
|
||||
Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti
|
||||
both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal
|
||||
without requiring a validator client restart.
|
||||
|
||||
Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification.
|
||||
|
||||
#### Example Command
|
||||
|
||||
```bash
|
||||
DATADIR=/var/lib/lighthouse
|
||||
curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \
|
||||
-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"graffiti": "Mr F was here"
|
||||
}' | jq
|
||||
```
|
||||
|
||||
A `null` response indicates that the request is successful.
|
Binary file not shown.
Before Width: | Height: | Size: 257 KiB After Width: | Height: | Size: 257 KiB |
Binary file not shown.
Before Width: | Height: | Size: 172 KiB After Width: | Height: | Size: 172 KiB |
@ -16,7 +16,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12<sup>
|
||||
|
||||
3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`?
|
||||
|
||||
No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days.
|
||||
No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01).
|
||||
|
||||
Figure below summarizes partial withdrawals.
|
||||
|
||||
|
@ -43,13 +43,12 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign
|
||||
Staying silent and refusing to sign messages will cause the following:
|
||||
|
||||
- 2-3 missed attestations, incurring penalties and missed rewards.
|
||||
- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards.
|
||||
- Potentially missed rewards by missing a block proposal (if the validator is an elected block
|
||||
proposer, which is unlikely).
|
||||
|
||||
The loss of rewards and penalties incurred due to the missed duties will be very small in
|
||||
dollar-values. Generally, they will equate to around one US dollar (at August 2021 figures) or about
|
||||
2% of the reward for one validator for one day. Since DP costs so little but can protect a user from
|
||||
dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than
|
||||
1% of the reward for one validator for one day. Since DP costs so little but can protect a user from
|
||||
slashing, many users will consider this a worthwhile trade-off.
|
||||
|
||||
The 2-3 epochs of missed duties will be incurred whenever the VC is started (e.g., after an update
|
||||
|
@ -97,7 +97,26 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which
|
||||
|
||||
- A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable.
|
||||
|
||||
- A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
|
||||
- A varying time of "validator sweep" that can take up to *n* days with *n* listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
|
||||
|
||||
<div align="center">
|
||||
|
||||
| Number of eligible validators | Ideal scenario *n* | Practical scenario *n* |
|
||||
|:----------------:|:---------------------:|:----:|
|
||||
| 300000 | 2.60 | 2.63 |
|
||||
| 400000 | 3.47 | 3.51 |
|
||||
| 500000 | 4.34 | 4.38 |
|
||||
| 600000 | 5.21 | 5.26 |
|
||||
| 700000 | 6.08 | 6.14 |
|
||||
| 800000 | 6.94 | 7.01 |
|
||||
| 900000 | 7.81 | 7.89 |
|
||||
| 1000000 | 8.68 | 8.77 |
|
||||
|
||||
</div>
|
||||
|
||||
> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days.
|
||||
|
||||
|
||||
|
||||
The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address.
|
||||
|
||||
|
@ -13,13 +13,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.settings(&[clap::AppSettings::ColoredHelp])
|
||||
.arg(
|
||||
Arg::with_name("enr-address")
|
||||
.value_name("IP-ADDRESS")
|
||||
.help("The external IP address/ DNS address to broadcast to other peers on how to reach this node. \
|
||||
If a DNS address is provided, the enr-address is set to the IP address it resolves to and \
|
||||
does not auto-update based on PONG responses in discovery.")
|
||||
.long("enr-address")
|
||||
.value_name("ADDRESS")
|
||||
.help("The IP address/ DNS address to broadcast to other peers on how to reach \
|
||||
this node. If a DNS address is provided, the enr-address is set to the IP \
|
||||
address it resolves to and does not auto-update based on PONG responses in \
|
||||
discovery. Set this only if you are sure other nodes can connect to your \
|
||||
local node on this address. This will update the `ip4` or `ip6` ENR fields \
|
||||
accordingly. To update both, set this flag twice with the different values.")
|
||||
.multiple(true)
|
||||
.max_values(2)
|
||||
.required(true)
|
||||
.takes_value(true)
|
||||
.conflicts_with("network-dir")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port")
|
||||
@ -29,11 +35,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.default_value("9000")
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("port6")
|
||||
.long("port6")
|
||||
.value_name("PORT")
|
||||
.help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \
|
||||
Ipv6. Defaults to 9090 when required.")
|
||||
.default_value("9090")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("listen-address")
|
||||
.long("listen-address")
|
||||
.value_name("ADDRESS")
|
||||
.help("The address the bootnode will listen for UDP connections.")
|
||||
.help("The address the bootnode will listen for UDP communications. To listen \
|
||||
over IpV4 and IpV6 set this flag twice with the different values.\n\
|
||||
Examples:\n\
|
||||
- --listen-address '0.0.0.0' will listen over Ipv4.\n\
|
||||
- --listen-address '::' will listen over Ipv6.\n\
|
||||
- --listen-address '0.0.0.0' --listen-address '::' will listen over both \
|
||||
Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \
|
||||
multiple Ipv4, or multiple Ipv6 addresses will not be accepted.")
|
||||
.multiple(true)
|
||||
.max_values(2)
|
||||
.default_value("0.0.0.0")
|
||||
.takes_value(true)
|
||||
)
|
||||
@ -59,6 +83,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.value_name("PORT")
|
||||
.help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \
|
||||
can connect to your local node on this port over IpV6.")
|
||||
.conflicts_with("network-dir")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
@ -77,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
Arg::with_name("network-dir")
|
||||
.value_name("NETWORK_DIR")
|
||||
.long("network-dir")
|
||||
.help("The directory which contains the enr and it's assoicated private key")
|
||||
.help("The directory which contains the enr and it's associated private key")
|
||||
.takes_value(true)
|
||||
)
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ use beacon_node::{get_data_dir, set_network_config};
|
||||
use clap::ArgMatches;
|
||||
use eth2_network_config::Eth2NetworkConfig;
|
||||
use lighthouse_network::discovery::create_enr_builder_from_config;
|
||||
use lighthouse_network::discv5::IpMode;
|
||||
use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr};
|
||||
use lighthouse_network::{
|
||||
discovery::{load_enr_from_disk, use_or_load_enr},
|
||||
@ -10,13 +9,12 @@ use lighthouse_network::{
|
||||
};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::Encode;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::{SocketAddrV4, SocketAddrV6};
|
||||
use std::{marker::PhantomData, path::PathBuf};
|
||||
use types::EthSpec;
|
||||
|
||||
/// A set of configuration parameters for the bootnode, established from CLI arguments.
|
||||
pub struct BootNodeConfig<T: EthSpec> {
|
||||
pub listen_socket: SocketAddr,
|
||||
// TODO: Generalise to multiaddr
|
||||
pub boot_nodes: Vec<Enr>,
|
||||
pub local_enr: Enr,
|
||||
@ -81,31 +79,6 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
||||
network_config.discv5_config.enr_update = false;
|
||||
}
|
||||
|
||||
// the address to listen on
|
||||
let listen_socket = match network_config.listen_addrs().clone() {
|
||||
lighthouse_network::ListenAddress::V4(v4_addr) => {
|
||||
// Set explicitly as ipv4 otherwise
|
||||
network_config.discv5_config.ip_mode = IpMode::Ip4;
|
||||
v4_addr.udp_socket_addr()
|
||||
}
|
||||
lighthouse_network::ListenAddress::V6(v6_addr) => {
|
||||
// create ipv6 sockets and enable ipv4 mapped addresses.
|
||||
network_config.discv5_config.ip_mode = IpMode::Ip6 {
|
||||
enable_mapped_addresses: false,
|
||||
};
|
||||
|
||||
v6_addr.udp_socket_addr()
|
||||
}
|
||||
lighthouse_network::ListenAddress::DualStack(_v4_addr, v6_addr) => {
|
||||
// create ipv6 sockets and enable ipv4 mapped addresses.
|
||||
network_config.discv5_config.ip_mode = IpMode::Ip6 {
|
||||
enable_mapped_addresses: true,
|
||||
};
|
||||
|
||||
v6_addr.udp_socket_addr()
|
||||
}
|
||||
};
|
||||
|
||||
let private_key = load_private_key(&network_config, &logger);
|
||||
let local_key = CombinedKey::from_libp2p(&private_key)?;
|
||||
|
||||
@ -143,7 +116,7 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
||||
let mut builder = create_enr_builder_from_config(&network_config, enable_tcp);
|
||||
// If we know of the ENR field, add it to the initial construction
|
||||
if let Some(enr_fork_bytes) = enr_fork {
|
||||
builder.add_value("eth2", enr_fork_bytes.as_slice());
|
||||
builder.add_value("eth2", &enr_fork_bytes);
|
||||
}
|
||||
builder
|
||||
.build(&local_key)
|
||||
@ -155,7 +128,6 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
||||
};
|
||||
|
||||
Ok(BootNodeConfig {
|
||||
listen_socket,
|
||||
boot_nodes,
|
||||
local_enr,
|
||||
local_key,
|
||||
@ -170,7 +142,8 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
||||
/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BootNodeConfigSerialization {
|
||||
pub listen_socket: SocketAddr,
|
||||
pub ipv4_listen_socket: Option<SocketAddrV4>,
|
||||
pub ipv6_listen_socket: Option<SocketAddrV6>,
|
||||
// TODO: Generalise to multiaddr
|
||||
pub boot_nodes: Vec<Enr>,
|
||||
pub local_enr: Enr,
|
||||
@ -183,7 +156,6 @@ impl BootNodeConfigSerialization {
|
||||
/// relevant fields of `config`
|
||||
pub fn from_config_ref<T: EthSpec>(config: &BootNodeConfig<T>) -> Self {
|
||||
let BootNodeConfig {
|
||||
listen_socket,
|
||||
boot_nodes,
|
||||
local_enr,
|
||||
local_key: _,
|
||||
@ -191,8 +163,27 @@ impl BootNodeConfigSerialization {
|
||||
phantom: _,
|
||||
} = config;
|
||||
|
||||
let (ipv4_listen_socket, ipv6_listen_socket) = match discv5_config.listen_config {
|
||||
lighthouse_network::discv5::ListenConfig::Ipv4 { ip, port } => {
|
||||
(Some(SocketAddrV4::new(ip, port)), None)
|
||||
}
|
||||
lighthouse_network::discv5::ListenConfig::Ipv6 { ip, port } => {
|
||||
(None, Some(SocketAddrV6::new(ip, port, 0, 0)))
|
||||
}
|
||||
lighthouse_network::discv5::ListenConfig::DualStack {
|
||||
ipv4,
|
||||
ipv4_port,
|
||||
ipv6,
|
||||
ipv6_port,
|
||||
} => (
|
||||
Some(SocketAddrV4::new(ipv4, ipv4_port)),
|
||||
Some(SocketAddrV6::new(ipv6, ipv6_port, 0, 0)),
|
||||
),
|
||||
};
|
||||
|
||||
BootNodeConfigSerialization {
|
||||
listen_socket: *listen_socket,
|
||||
ipv4_listen_socket,
|
||||
ipv6_listen_socket,
|
||||
boot_nodes: boot_nodes.clone(),
|
||||
local_enr: local_enr.clone(),
|
||||
disable_packet_filter: !discv5_config.enable_packet_filter,
|
||||
|
@ -10,7 +10,6 @@ use types::EthSpec;
|
||||
|
||||
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||
let BootNodeConfig {
|
||||
listen_socket,
|
||||
boot_nodes,
|
||||
local_enr,
|
||||
local_key,
|
||||
@ -31,7 +30,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||
let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string());
|
||||
info!(
|
||||
log, "Configuration parameters";
|
||||
"listening_address" => %listen_socket,
|
||||
"listening_address" => ?discv5_config.listen_config,
|
||||
"advertised_v4_address" => ?pretty_v4_socket,
|
||||
"advertised_v6_address" => ?pretty_v6_socket,
|
||||
"eth2" => eth2_field
|
||||
@ -41,6 +40,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||
|
||||
// build the contactable multiaddr list, adding the p2p protocol
|
||||
info!(log, "Contact information"; "enr" => local_enr.to_base64());
|
||||
info!(log, "Enr details"; "enr" => ?local_enr);
|
||||
info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p());
|
||||
|
||||
// construct the discv5 server
|
||||
@ -64,7 +64,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||
}
|
||||
|
||||
// start the server
|
||||
if let Err(e) = discv5.start(listen_socket).await {
|
||||
if let Err(e) = discv5.start().await {
|
||||
slog::crit!(log, "Could not start discv5 server"; "error" => %e);
|
||||
return;
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ edition = "2021"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
serde_json = "1.0.58"
|
||||
types = { path = "../../consensus/types" }
|
||||
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
||||
reqwest = { version = "0.11.0", features = ["json", "stream"] }
|
||||
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
||||
proto_array = { path = "../../consensus/proto_array", optional = true }
|
||||
ethereum_serde_utils = "0.5.0"
|
||||
@ -26,7 +26,7 @@ futures-util = "0.3.8"
|
||||
futures = "0.3.8"
|
||||
store = { path = "../../beacon_node/store", optional = true }
|
||||
slashing_protection = { path = "../../validator_client/slashing_protection", optional = true }
|
||||
mime = "0.3.16"
|
||||
mediatype = "0.19.13"
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
psutil = { version = "3.2.2", optional = true }
|
||||
@ -34,4 +34,10 @@ procinfo = { version = "0.4.2", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["lighthouse"]
|
||||
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
||||
lighthouse = [
|
||||
"proto_array",
|
||||
"psutil",
|
||||
"procinfo",
|
||||
"store",
|
||||
"slashing_protection",
|
||||
]
|
||||
|
@ -218,7 +218,11 @@ impl BeaconNodeHttpClient {
|
||||
|
||||
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
||||
async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> {
|
||||
match self.get_response(url, |b| b).await.optional()? {
|
||||
match self
|
||||
.get_response(url, |b| b.accept(Accept::Json))
|
||||
.await
|
||||
.optional()?
|
||||
{
|
||||
Some(response) => Ok(Some(response.json().await?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
@ -231,7 +235,7 @@ impl BeaconNodeHttpClient {
|
||||
timeout: Duration,
|
||||
) -> Result<Option<T>, Error> {
|
||||
let opt_response = self
|
||||
.get_response(url, |b| b.timeout(timeout))
|
||||
.get_response(url, |b| b.timeout(timeout).accept(Accept::Json))
|
||||
.await
|
||||
.optional()?;
|
||||
match opt_response {
|
||||
@ -1010,16 +1014,14 @@ impl BeaconNodeHttpClient {
|
||||
|
||||
/// `GET beacon/deposit_snapshot`
|
||||
pub async fn get_deposit_snapshot(&self) -> Result<Option<types::DepositTreeSnapshot>, Error> {
|
||||
use ssz::Decode;
|
||||
let mut path = self.eth_path(V1)?;
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("beacon")
|
||||
.push("deposit_snapshot");
|
||||
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot)
|
||||
.await?
|
||||
.map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
|
||||
.transpose()
|
||||
self.get_opt_with_timeout::<GenericResponse<_>, _>(path, self.timeouts.get_deposit_snapshot)
|
||||
.await
|
||||
.map(|opt| opt.map(|r| r.data))
|
||||
}
|
||||
|
||||
/// `POST beacon/rewards/sync_committee`
|
||||
|
@ -16,6 +16,7 @@ use std::path::Path;
|
||||
|
||||
pub use reqwest;
|
||||
pub use reqwest::{Response, StatusCode, Url};
|
||||
use types::graffiti::GraffitiString;
|
||||
|
||||
/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a
|
||||
/// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`).
|
||||
@ -467,6 +468,7 @@ impl ValidatorClientHttpClient {
|
||||
enabled: Option<bool>,
|
||||
gas_limit: Option<u64>,
|
||||
builder_proposals: Option<bool>,
|
||||
graffiti: Option<GraffitiString>,
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
@ -482,6 +484,7 @@ impl ValidatorClientHttpClient {
|
||||
enabled,
|
||||
gas_limit,
|
||||
builder_proposals,
|
||||
graffiti,
|
||||
},
|
||||
)
|
||||
.await
|
||||
|
@ -83,6 +83,9 @@ pub struct ValidatorPatchRequest {
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub builder_proposals: Option<bool>,
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub graffiti: Option<GraffitiString>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
|
@ -3,10 +3,9 @@
|
||||
|
||||
use crate::Error as ServerError;
|
||||
use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus};
|
||||
use mime::{Mime, APPLICATION, JSON, OCTET_STREAM, STAR};
|
||||
use mediatype::{names, MediaType, MediaTypeList};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssz_derive::Encode;
|
||||
use std::cmp::Reverse;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::str::{from_utf8, FromStr};
|
||||
@ -1173,35 +1172,58 @@ impl FromStr for Accept {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut mimes = parse_accept(s)?;
|
||||
let media_type_list = MediaTypeList::new(s);
|
||||
|
||||
// [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2
|
||||
// find the highest q-factor supported accept type
|
||||
mimes.sort_by_key(|m| {
|
||||
Reverse(m.get_param("q").map_or(1000_u16, |n| {
|
||||
(n.as_ref().parse::<f32>().unwrap_or(0_f32) * 1000_f32) as u16
|
||||
}))
|
||||
});
|
||||
mimes
|
||||
.into_iter()
|
||||
.find_map(|m| match (m.type_(), m.subtype()) {
|
||||
let mut highest_q = 0_u16;
|
||||
let mut accept_type = None;
|
||||
|
||||
const APPLICATION: &str = names::APPLICATION.as_str();
|
||||
const OCTET_STREAM: &str = names::OCTET_STREAM.as_str();
|
||||
const JSON: &str = names::JSON.as_str();
|
||||
const STAR: &str = names::_STAR.as_str();
|
||||
const Q: &str = names::Q.as_str();
|
||||
|
||||
media_type_list.into_iter().for_each(|item| {
|
||||
if let Ok(MediaType {
|
||||
ty,
|
||||
subty,
|
||||
suffix: _,
|
||||
params,
|
||||
}) = item
|
||||
{
|
||||
let q_accept = match (ty.as_str(), subty.as_str()) {
|
||||
(APPLICATION, OCTET_STREAM) => Some(Accept::Ssz),
|
||||
(APPLICATION, JSON) => Some(Accept::Json),
|
||||
(STAR, STAR) => Some(Accept::Any),
|
||||
_ => None,
|
||||
})
|
||||
.ok_or_else(|| "accept header is not supported".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_accept(accept: &str) -> Result<Vec<Mime>, String> {
|
||||
accept
|
||||
.split(',')
|
||||
.map(|part| {
|
||||
part.parse()
|
||||
.map_err(|e| format!("error parsing Accept header: {}", e))
|
||||
.map(|item_accept_type| {
|
||||
let q_val = params
|
||||
.iter()
|
||||
.find_map(|(n, v)| match n.as_str() {
|
||||
Q => {
|
||||
Some((v.as_str().parse::<f32>().unwrap_or(0_f32) * 1000_f32) as u16)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
.or(Some(1000_u16));
|
||||
|
||||
(q_val.unwrap(), item_accept_type)
|
||||
});
|
||||
|
||||
match q_accept {
|
||||
Some((q, accept)) if q > highest_q => {
|
||||
highest_q = q;
|
||||
accept_type = Some(accept);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
});
|
||||
accept_type.ok_or_else(|| "accept header is not supported".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@ -1269,7 +1291,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
Accept::from_str("text/plain"),
|
||||
Err("accept header is not supported".to_string())
|
||||
)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Accept::from_str("application/json;message=\"Hello, world!\";q=0.3,*/*;q=0.6").unwrap(),
|
||||
Accept::Any
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,4 +20,4 @@ types = { path = "../../consensus/types"}
|
||||
kzg = { path = "../../crypto/kzg" }
|
||||
ethereum_ssz = "0.5.0"
|
||||
eth2_config = { path = "../eth2_config"}
|
||||
discv5 = "0.2.2"
|
||||
discv5 = "0.3.0"
|
@ -174,21 +174,6 @@ impl<T> From<proto_array::Error> for Error<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates whether the unrealized justification of a block should be calculated and tracked.
|
||||
/// If a block has been finalized, this can be set to false. This is useful when syncing finalized
|
||||
/// portions of the chain. Otherwise this should always be set to true.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum CountUnrealized {
|
||||
True,
|
||||
False,
|
||||
}
|
||||
|
||||
impl CountUnrealized {
|
||||
pub fn is_true(&self) -> bool {
|
||||
matches!(self, CountUnrealized::True)
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates if a block has been verified by an execution payload.
|
||||
///
|
||||
/// There is no variant for "invalid", since such a block should never be added to fork choice.
|
||||
@ -661,8 +646,14 @@ where
|
||||
state: &BeaconState<E>,
|
||||
payload_verification_status: PayloadVerificationStatus,
|
||||
spec: &ChainSpec,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<(), Error<T::Error>> {
|
||||
// If this block has already been processed we do not need to reprocess it.
|
||||
// We check this immediately in case re-processing the block mutates some property of the
|
||||
// global fork choice store, e.g. the justified checkpoints or the proposer boost root.
|
||||
if self.proto_array.contains_block(&block_root) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Provide the slot (as per the system clock) to the `fc_store` and then return its view of
|
||||
// the current slot. The `fc_store` will ensure that the `current_slot` is never
|
||||
// decreasing, a property which we must maintain.
|
||||
@ -728,9 +719,6 @@ where
|
||||
)?;
|
||||
|
||||
// Update unrealized justified/finalized checkpoints.
|
||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized
|
||||
.is_true()
|
||||
{
|
||||
let block_epoch = block.slot().epoch(E::slots_per_epoch());
|
||||
|
||||
// If the parent checkpoints are already at the same epoch as the block being imported,
|
||||
@ -748,8 +736,7 @@ where
|
||||
.unrealized_justified_checkpoint
|
||||
.zip(parent_block.unrealized_finalized_checkpoint)
|
||||
.filter(|(parent_justified, parent_finalized)| {
|
||||
parent_justified.epoch == block_epoch
|
||||
&& parent_finalized.epoch + 1 >= block_epoch
|
||||
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
|
||||
});
|
||||
|
||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
|
||||
@ -814,14 +801,6 @@ where
|
||||
)?;
|
||||
}
|
||||
|
||||
(
|
||||
Some(unrealized_justified_checkpoint),
|
||||
Some(unrealized_finalized_checkpoint),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let target_slot = block
|
||||
.slot()
|
||||
.epoch(E::slots_per_epoch())
|
||||
@ -891,8 +870,8 @@ where
|
||||
justified_checkpoint: state.current_justified_checkpoint(),
|
||||
finalized_checkpoint: state.finalized_checkpoint(),
|
||||
execution_status,
|
||||
unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint,
|
||||
unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint),
|
||||
unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint),
|
||||
},
|
||||
current_slot,
|
||||
)?;
|
||||
|
@ -2,9 +2,9 @@ mod fork_choice;
|
||||
mod fork_choice_store;
|
||||
|
||||
pub use crate::fork_choice::{
|
||||
AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView,
|
||||
ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
||||
PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses,
|
||||
AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters,
|
||||
InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice,
|
||||
QueuedAttestation, ResetPayloadStatuses,
|
||||
};
|
||||
pub use fork_choice_store::ForkChoiceStore;
|
||||
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
|
||||
|
@ -12,8 +12,7 @@ use beacon_chain::{
|
||||
StateSkipConfig, WhenSlotSkipped,
|
||||
};
|
||||
use fork_choice::{
|
||||
CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
||||
QueuedAttestation,
|
||||
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation,
|
||||
};
|
||||
use store::MemoryStore;
|
||||
use types::{
|
||||
@ -288,7 +287,6 @@ impl ForkChoiceTest {
|
||||
&state,
|
||||
PayloadVerificationStatus::Verified,
|
||||
&self.harness.chain.spec,
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.unwrap();
|
||||
self
|
||||
@ -331,7 +329,6 @@ impl ForkChoiceTest {
|
||||
&state,
|
||||
PayloadVerificationStatus::Verified,
|
||||
&self.harness.chain.spec,
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.err()
|
||||
.expect("on_block did not return an error");
|
||||
|
@ -30,8 +30,10 @@ impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock {
|
||||
pub struct DepositTreeSnapshot {
|
||||
pub finalized: Vec<Hash256>,
|
||||
pub deposit_root: Hash256,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub deposit_count: u64,
|
||||
pub execution_block_hash: Hash256,
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
pub execution_block_height: u64,
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ impl CommandLineTest {
|
||||
}
|
||||
|
||||
fn run_with_ip(&mut self) -> CompletedTest<BootNodeConfigSerialization> {
|
||||
self.cmd.arg(IP_ADDRESS);
|
||||
self.cmd.arg("--enr-address").arg(IP_ADDRESS);
|
||||
self.run()
|
||||
}
|
||||
}
|
||||
@ -67,7 +67,13 @@ fn port_flag() {
|
||||
.flag("port", Some(port.to_string().as_str()))
|
||||
.run_with_ip()
|
||||
.with_config(|config| {
|
||||
assert_eq!(config.listen_socket.port(), port);
|
||||
assert_eq!(
|
||||
config
|
||||
.ipv4_listen_socket
|
||||
.expect("Bootnode should be listening on IPv4")
|
||||
.port(),
|
||||
port
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
@ -78,7 +84,13 @@ fn listen_address_flag() {
|
||||
.flag("listen-address", Some("127.0.0.2"))
|
||||
.run_with_ip()
|
||||
.with_config(|config| {
|
||||
assert_eq!(config.listen_socket.ip(), addr);
|
||||
assert_eq!(
|
||||
config
|
||||
.ipv4_listen_socket
|
||||
.expect("Bootnode should be listening on IPv4")
|
||||
.ip(),
|
||||
&addr
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -499,3 +499,24 @@ fn latency_measurement_service() {
|
||||
assert!(!config.enable_latency_measurement_service);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validator_registration_batch_size() {
|
||||
CommandLineTest::new().run().with_config(|config| {
|
||||
assert_eq!(config.validator_registration_batch_size, 500);
|
||||
});
|
||||
CommandLineTest::new()
|
||||
.flag("validator-registration-batch-size", Some("100"))
|
||||
.run()
|
||||
.with_config(|config| {
|
||||
assert_eq!(config.validator_registration_batch_size, 100);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn validator_registration_batch_size_zero_value() {
|
||||
CommandLineTest::new()
|
||||
.flag("validator-registration-batch-size", Some("0"))
|
||||
.run();
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ use beacon_chain::{
|
||||
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
||||
},
|
||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||
BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer,
|
||||
BeaconChainTypes, CachedHead, NotifyExecutionLayer,
|
||||
};
|
||||
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
||||
use serde::Deserialize;
|
||||
@ -381,7 +381,6 @@ impl<E: EthSpec> Tester<E> {
|
||||
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
||||
block_root,
|
||||
block.clone(),
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
))?;
|
||||
if result.is_ok() != valid {
|
||||
@ -441,7 +440,6 @@ impl<E: EthSpec> Tester<E> {
|
||||
&state,
|
||||
PayloadVerificationStatus::Irrelevant,
|
||||
&self.harness.chain.spec,
|
||||
CountUnrealized::True,
|
||||
);
|
||||
|
||||
if result.is_ok() {
|
||||
|
@ -333,6 +333,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.default_value("true")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("validator-registration-batch-size")
|
||||
.long("validator-registration-batch-size")
|
||||
.value_name("INTEGER")
|
||||
.help("Defines the number of validators per \
|
||||
validator/register_validator request sent to the BN. This value \
|
||||
can be reduced to avoid timeouts from builders.")
|
||||
.default_value("500")
|
||||
.takes_value(true),
|
||||
)
|
||||
/*
|
||||
* Experimental/development options.
|
||||
*/
|
||||
|
@ -77,6 +77,8 @@ pub struct Config {
|
||||
pub disable_run_on_all: bool,
|
||||
/// Enables a service which attempts to measure latency between the VC and BNs.
|
||||
pub enable_latency_measurement_service: bool,
|
||||
/// Defines the number of validators per `validator/register_validator` request sent to the BN.
|
||||
pub validator_registration_batch_size: usize,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -117,6 +119,7 @@ impl Default for Config {
|
||||
gas_limit: None,
|
||||
disable_run_on_all: false,
|
||||
enable_latency_measurement_service: true,
|
||||
validator_registration_batch_size: 500,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -380,6 +383,12 @@ impl Config {
|
||||
config.enable_latency_measurement_service =
|
||||
parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true);
|
||||
|
||||
config.validator_registration_batch_size =
|
||||
parse_required(cli_args, "validator-registration-batch-size")?;
|
||||
if config.validator_registration_batch_size == 0 {
|
||||
return Err("validator-registration-batch-size cannot be 0".to_string());
|
||||
}
|
||||
|
||||
/*
|
||||
* Experimental
|
||||
*/
|
||||
|
@ -357,7 +357,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
.and(warp::path("graffiti"))
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(graffiti_file_filter)
|
||||
.and(graffiti_file_filter.clone())
|
||||
.and(graffiti_flag_filter)
|
||||
.and(signer.clone())
|
||||
.and(log_filter.clone())
|
||||
@ -617,18 +617,27 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(graffiti_file_filter)
|
||||
.and(signer.clone())
|
||||
.and(task_executor_filter.clone())
|
||||
.and_then(
|
||||
|validator_pubkey: PublicKey,
|
||||
body: api_types::ValidatorPatchRequest,
|
||||
validator_store: Arc<ValidatorStore<T, E>>,
|
||||
graffiti_file: Option<GraffitiFile>,
|
||||
signer,
|
||||
task_executor: TaskExecutor| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
if body.graffiti.is_some() && graffiti_file.is_some() {
|
||||
return Err(warp_utils::reject::custom_bad_request(
|
||||
"Unable to update graffiti as the \"--graffiti-file\" flag is set"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let maybe_graffiti = body.graffiti.clone().map(Into::into);
|
||||
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
||||
let mut initialized_validators = initialized_validators_rw_lock.write();
|
||||
|
||||
match (
|
||||
initialized_validators.is_enabled(&validator_pubkey),
|
||||
initialized_validators.validator(&validator_pubkey.compress()),
|
||||
@ -641,7 +650,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
if Some(is_enabled) == body.enabled
|
||||
&& initialized_validator.get_gas_limit() == body.gas_limit
|
||||
&& initialized_validator.get_builder_proposals()
|
||||
== body.builder_proposals =>
|
||||
== body.builder_proposals
|
||||
&& initialized_validator.get_graffiti() == maybe_graffiti =>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
@ -654,6 +664,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
body.enabled,
|
||||
body.gas_limit,
|
||||
body.builder_proposals,
|
||||
body.graffiti,
|
||||
),
|
||||
)
|
||||
.map_err(|e| {
|
||||
|
@ -28,12 +28,14 @@ use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use task_executor::TaskExecutor;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio::sync::oneshot;
|
||||
use types::graffiti::GraffitiString;
|
||||
|
||||
const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
|
||||
pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42);
|
||||
@ -533,7 +535,7 @@ impl ApiTester {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
|
||||
self.client
|
||||
.patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None)
|
||||
.patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -575,7 +577,13 @@ impl ApiTester {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
|
||||
self.client
|
||||
.patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None)
|
||||
.patch_lighthouse_validators(
|
||||
&validator.voting_pubkey,
|
||||
None,
|
||||
Some(gas_limit),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@ -602,6 +610,7 @@ impl ApiTester {
|
||||
None,
|
||||
None,
|
||||
Some(builder_proposals),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@ -620,6 +629,34 @@ impl ApiTester {
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn set_graffiti(self, index: usize, graffiti: &str) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
let graffiti_str = GraffitiString::from_str(graffiti).unwrap();
|
||||
self.client
|
||||
.patch_lighthouse_validators(
|
||||
&validator.voting_pubkey,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
Some(graffiti_str),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn assert_graffiti(self, index: usize, graffiti: &str) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
let graffiti_str = GraffitiString::from_str(graffiti).unwrap();
|
||||
assert_eq!(
|
||||
self.validator_store.graffiti(&validator.voting_pubkey),
|
||||
Some(graffiti_str.into())
|
||||
);
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct HdValidatorScenario {
|
||||
@ -723,7 +760,13 @@ fn routes_with_invalid_auth() {
|
||||
.await
|
||||
.test_with_invalid_auth(|client| async move {
|
||||
client
|
||||
.patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None)
|
||||
.patch_lighthouse_validators(
|
||||
&PublicKeyBytes::empty(),
|
||||
Some(false),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
@ -931,6 +974,41 @@ fn validator_builder_proposals() {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validator_graffiti() {
|
||||
let runtime = build_runtime();
|
||||
let weak_runtime = Arc::downgrade(&runtime);
|
||||
runtime.block_on(async {
|
||||
ApiTester::new(weak_runtime)
|
||||
.await
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.set_graffiti(0, "Mr F was here")
|
||||
.await
|
||||
.assert_graffiti(0, "Mr F was here")
|
||||
.await
|
||||
// Test setting graffiti while the validator is disabled
|
||||
.set_validator_enabled(0, false)
|
||||
.await
|
||||
.assert_enabled_validators_count(1)
|
||||
.assert_validators_count(2)
|
||||
.set_graffiti(0, "Mr F was here again")
|
||||
.await
|
||||
.set_validator_enabled(0, true)
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_graffiti(0, "Mr F was here again")
|
||||
.await
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keystore_validator_creation() {
|
||||
let runtime = build_runtime();
|
||||
|
@ -468,7 +468,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() {
|
||||
for pubkey in &pubkeys {
|
||||
tester
|
||||
.client
|
||||
.patch_lighthouse_validators(pubkey, Some(false), None, None)
|
||||
.patch_lighthouse_validators(pubkey, Some(false), None, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ use std::io::{self, Read};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use types::graffiti::GraffitiString;
|
||||
use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes};
|
||||
use url::{ParseError, Url};
|
||||
use validator_dir::Builder as ValidatorDirBuilder;
|
||||
@ -147,6 +148,10 @@ impl InitializedValidator {
|
||||
pub fn get_index(&self) -> Option<u64> {
|
||||
self.index
|
||||
}
|
||||
|
||||
pub fn get_graffiti(&self) -> Option<Graffiti> {
|
||||
self.graffiti
|
||||
}
|
||||
}
|
||||
|
||||
fn open_keystore(path: &Path) -> Result<Keystore, Error> {
|
||||
@ -671,8 +676,8 @@ impl InitializedValidators {
|
||||
self.validators.get(public_key)
|
||||
}
|
||||
|
||||
/// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals`
|
||||
/// values.
|
||||
/// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`,
|
||||
/// `builder_proposals`, and `graffiti` values.
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
@ -682,7 +687,7 @@ impl InitializedValidators {
|
||||
///
|
||||
/// If a `gas_limit` is included in the call to this function, it will also be updated and saved
|
||||
/// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition`
|
||||
/// or `InitializedValidator`. The same logic applies to `builder_proposals`.
|
||||
/// or `InitializedValidator`. The same logic applies to `builder_proposals` and `graffiti`.
|
||||
///
|
||||
/// Saves the `ValidatorDefinitions` to file, even if no definitions were changed.
|
||||
pub async fn set_validator_definition_fields(
|
||||
@ -691,6 +696,7 @@ impl InitializedValidators {
|
||||
enabled: Option<bool>,
|
||||
gas_limit: Option<u64>,
|
||||
builder_proposals: Option<bool>,
|
||||
graffiti: Option<GraffitiString>,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(def) = self
|
||||
.definitions
|
||||
@ -708,6 +714,9 @@ impl InitializedValidators {
|
||||
if let Some(builder_proposals) = builder_proposals {
|
||||
def.builder_proposals = Some(builder_proposals);
|
||||
}
|
||||
if let Some(graffiti) = graffiti.clone() {
|
||||
def.graffiti = Some(graffiti);
|
||||
}
|
||||
}
|
||||
|
||||
self.update_validators().await?;
|
||||
@ -723,6 +732,9 @@ impl InitializedValidators {
|
||||
if let Some(builder_proposals) = builder_proposals {
|
||||
val.builder_proposals = Some(builder_proposals);
|
||||
}
|
||||
if let Some(graffiti) = graffiti {
|
||||
val.graffiti = Some(graffiti.into());
|
||||
}
|
||||
}
|
||||
|
||||
self.definitions
|
||||
|
@ -487,6 +487,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
.beacon_nodes(beacon_nodes.clone())
|
||||
.runtime_context(context.service_context("preparation".into()))
|
||||
.builder_registration_timestamp_override(config.builder_registration_timestamp_override)
|
||||
.validator_registration_batch_size(config.validator_registration_batch_size)
|
||||
.build()?;
|
||||
|
||||
let sync_committee_service = SyncCommitteeService::new(
|
||||
|
@ -23,9 +23,6 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2;
|
||||
/// Number of epochs to wait before re-submitting validator registration.
|
||||
const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1;
|
||||
|
||||
/// The number of validator registrations to include per request to the beacon node.
|
||||
const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500;
|
||||
|
||||
/// Builds an `PreparationService`.
|
||||
pub struct PreparationServiceBuilder<T: SlotClock + 'static, E: EthSpec> {
|
||||
validator_store: Option<Arc<ValidatorStore<T, E>>>,
|
||||
@ -33,6 +30,7 @@ pub struct PreparationServiceBuilder<T: SlotClock + 'static, E: EthSpec> {
|
||||
beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>,
|
||||
context: Option<RuntimeContext<E>>,
|
||||
builder_registration_timestamp_override: Option<u64>,
|
||||
validator_registration_batch_size: Option<usize>,
|
||||
}
|
||||
|
||||
impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> {
|
||||
@ -43,6 +41,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> {
|
||||
beacon_nodes: None,
|
||||
context: None,
|
||||
builder_registration_timestamp_override: None,
|
||||
validator_registration_batch_size: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,6 +73,14 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn validator_registration_batch_size(
|
||||
mut self,
|
||||
validator_registration_batch_size: usize,
|
||||
) -> Self {
|
||||
self.validator_registration_batch_size = Some(validator_registration_batch_size);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<PreparationService<T, E>, String> {
|
||||
Ok(PreparationService {
|
||||
inner: Arc::new(Inner {
|
||||
@ -91,6 +98,9 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> {
|
||||
.ok_or("Cannot build PreparationService without runtime_context")?,
|
||||
builder_registration_timestamp_override: self
|
||||
.builder_registration_timestamp_override,
|
||||
validator_registration_batch_size: self.validator_registration_batch_size.ok_or(
|
||||
"Cannot build PreparationService without validator_registration_batch_size",
|
||||
)?,
|
||||
validator_registration_cache: RwLock::new(HashMap::new()),
|
||||
}),
|
||||
})
|
||||
@ -107,6 +117,7 @@ pub struct Inner<T, E: EthSpec> {
|
||||
// Used to track unpublished validator registration changes.
|
||||
validator_registration_cache:
|
||||
RwLock<HashMap<ValidatorRegistrationKey, SignedValidatorRegistrationData>>,
|
||||
validator_registration_batch_size: usize,
|
||||
}
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone)]
|
||||
@ -447,7 +458,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> {
|
||||
}
|
||||
|
||||
if !signed.is_empty() {
|
||||
for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) {
|
||||
for batch in signed.chunks(self.validator_registration_batch_size) {
|
||||
match self
|
||||
.beacon_nodes
|
||||
.first_success(
|
||||
@ -462,7 +473,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> {
|
||||
Ok(()) => info!(
|
||||
log,
|
||||
"Published validator registrations to the builder network";
|
||||
"count" => registration_data_len,
|
||||
"count" => batch.len(),
|
||||
),
|
||||
Err(e) => warn!(
|
||||
log,
|
||||
|
Loading…
Reference in New Issue
Block a user