Merge pull request #4432 from jimmygchen/deneb-merge-from-unstable-20230627
Deneb merge from unstable 20230627
This commit is contained in:
commit
4c9fcf1e83
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@ -134,17 +134,11 @@ jobs:
|
|||||||
|
|
||||||
- name: Build Lighthouse for Windows portable
|
- name: Build Lighthouse for Windows portable
|
||||||
if: matrix.arch == 'x86_64-windows-portable'
|
if: matrix.arch == 'x86_64-windows-portable'
|
||||||
# NOTE: profile set to release until this rustc issue is fixed:
|
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }}
|
||||||
#
|
|
||||||
# https://github.com/rust-lang/rust/issues/107781
|
|
||||||
#
|
|
||||||
# tracked at: https://github.com/sigp/lighthouse/issues/3964
|
|
||||||
run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile release
|
|
||||||
|
|
||||||
- name: Build Lighthouse for Windows modern
|
- name: Build Lighthouse for Windows modern
|
||||||
if: matrix.arch == 'x86_64-windows'
|
if: matrix.arch == 'x86_64-windows'
|
||||||
# NOTE: profile set to release (see above)
|
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }}
|
||||||
run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile release
|
|
||||||
|
|
||||||
- name: Configure GPG and create artifacts
|
- name: Configure GPG and create artifacts
|
||||||
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
if: startsWith(matrix.arch, 'x86_64-windows') != true
|
||||||
|
321
Cargo.lock
generated
321
Cargo.lock
generated
@ -566,6 +566,12 @@ version = "0.1.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce"
|
checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "base16ct"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "base64"
|
name = "base64"
|
||||||
version = "0.13.1"
|
version = "0.13.1"
|
||||||
@ -659,6 +665,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
|
"tree_hash_derive",
|
||||||
"types",
|
"types",
|
||||||
"unused_port",
|
"unused_port",
|
||||||
]
|
]
|
||||||
@ -1416,6 +1423,18 @@ dependencies = [
|
|||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crypto-bigint"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15"
|
||||||
|
dependencies = [
|
||||||
|
"generic-array",
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
"subtle",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crypto-common"
|
name = "crypto-common"
|
||||||
version = "0.1.6"
|
version = "0.1.6"
|
||||||
@ -1511,11 +1530,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "curve25519-dalek"
|
name = "curve25519-dalek"
|
||||||
version = "4.0.0-rc.1"
|
version = "4.0.0-rc.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16"
|
checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
|
"digest 0.10.7",
|
||||||
"fiat-crypto",
|
"fiat-crypto",
|
||||||
"packed_simd_2",
|
"packed_simd_2",
|
||||||
"platforms 3.0.2",
|
"platforms 3.0.2",
|
||||||
@ -1698,6 +1718,16 @@ dependencies = [
|
|||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "der"
|
||||||
|
version = "0.7.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17"
|
||||||
|
dependencies = [
|
||||||
|
"const-oid",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "der-parser"
|
name = "der-parser"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
@ -1844,6 +1874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"block-buffer 0.10.4",
|
"block-buffer 0.10.4",
|
||||||
|
"const-oid",
|
||||||
"crypto-common",
|
"crypto-common",
|
||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
@ -1900,15 +1931,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "discv5"
|
name = "discv5"
|
||||||
version = "0.2.2"
|
version = "0.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b009a99b85b58900df46435307fc5c4c845af7e182582b1fbf869572fa9fce69"
|
checksum = "77f32d27968ba86689e3f0eccba0383414348a6fc5918b0a639c98dd81e20ed6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes 0.7.5",
|
"aes 0.7.5",
|
||||||
"aes-gcm 0.9.4",
|
"aes-gcm 0.9.4",
|
||||||
"arrayvec",
|
"arrayvec",
|
||||||
"delay_map",
|
"delay_map",
|
||||||
"enr 0.7.0",
|
"enr 0.8.1",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
"hashlink 0.7.0",
|
"hashlink 0.7.0",
|
||||||
@ -1924,8 +1955,6 @@ dependencies = [
|
|||||||
"smallvec",
|
"smallvec",
|
||||||
"socket2 0.4.9",
|
"socket2 0.4.9",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
|
||||||
"tokio-util 0.6.10",
|
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"uint",
|
"uint",
|
||||||
@ -1961,10 +1990,24 @@ version = "0.14.8"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c"
|
checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"der",
|
"der 0.6.1",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"rfc6979",
|
"rfc6979 0.3.1",
|
||||||
"signature",
|
"signature 1.6.4",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ecdsa"
|
||||||
|
version = "0.16.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428"
|
||||||
|
dependencies = [
|
||||||
|
"der 0.7.6",
|
||||||
|
"digest 0.10.7",
|
||||||
|
"elliptic-curve 0.13.5",
|
||||||
|
"rfc6979 0.4.0",
|
||||||
|
"signature 2.1.0",
|
||||||
|
"spki 0.7.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1973,7 +2016,17 @@ version = "1.5.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
|
checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"signature",
|
"signature 1.6.4",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ed25519"
|
||||||
|
version = "2.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963"
|
||||||
|
dependencies = [
|
||||||
|
"pkcs8 0.10.2",
|
||||||
|
"signature 2.1.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1983,13 +2036,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
|
checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"curve25519-dalek 3.2.0",
|
"curve25519-dalek 3.2.0",
|
||||||
"ed25519",
|
"ed25519 1.5.3",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
"serde",
|
"serde",
|
||||||
"sha2 0.9.9",
|
"sha2 0.9.9",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ed25519-dalek"
|
||||||
|
version = "2.0.0-rc.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "798f704d128510932661a3489b08e3f4c934a01d61c5def59ae7b8e48f19665a"
|
||||||
|
dependencies = [
|
||||||
|
"curve25519-dalek 4.0.0-rc.2",
|
||||||
|
"ed25519 2.2.1",
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
"serde",
|
||||||
|
"sha2 0.10.6",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ef_tests"
|
name = "ef_tests"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
@ -2037,18 +2104,37 @@ version = "0.12.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3"
|
checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base16ct",
|
"base16ct 0.1.1",
|
||||||
"crypto-bigint",
|
"crypto-bigint 0.4.9",
|
||||||
"der",
|
"der 0.6.1",
|
||||||
"digest 0.10.7",
|
"digest 0.10.7",
|
||||||
"ff",
|
"ff 0.12.1",
|
||||||
"generic-array",
|
"generic-array",
|
||||||
"group",
|
"group 0.12.1",
|
||||||
"hkdf",
|
"hkdf",
|
||||||
"pem-rfc7468",
|
"pem-rfc7468",
|
||||||
"pkcs8",
|
"pkcs8 0.9.0",
|
||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
"sec1",
|
"sec1 0.3.0",
|
||||||
|
"subtle",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "elliptic-curve"
|
||||||
|
version = "0.13.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b"
|
||||||
|
dependencies = [
|
||||||
|
"base16ct 0.2.0",
|
||||||
|
"crypto-bigint 0.5.2",
|
||||||
|
"digest 0.10.7",
|
||||||
|
"ff 0.13.0",
|
||||||
|
"generic-array",
|
||||||
|
"group 0.13.0",
|
||||||
|
"pkcs8 0.10.2",
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
"sec1 0.7.2",
|
||||||
"subtle",
|
"subtle",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
@ -2072,7 +2158,7 @@ dependencies = [
|
|||||||
"bs58",
|
"bs58",
|
||||||
"bytes",
|
"bytes",
|
||||||
"hex",
|
"hex",
|
||||||
"k256",
|
"k256 0.11.6",
|
||||||
"log",
|
"log",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rlp",
|
"rlp",
|
||||||
@ -2083,16 +2169,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "enr"
|
name = "enr"
|
||||||
version = "0.7.0"
|
version = "0.8.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad"
|
checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.13.1",
|
"base64 0.13.1",
|
||||||
"bs58",
|
|
||||||
"bytes",
|
"bytes",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek 2.0.0-rc.2",
|
||||||
"hex",
|
"hex",
|
||||||
"k256",
|
"k256 0.13.1",
|
||||||
"log",
|
"log",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rlp",
|
"rlp",
|
||||||
@ -2258,7 +2343,9 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"libsecp256k1",
|
"libsecp256k1",
|
||||||
"lighthouse_network",
|
"lighthouse_network",
|
||||||
|
"mediatype",
|
||||||
"mime",
|
"mime",
|
||||||
|
"pretty_reqwest_error",
|
||||||
"procinfo",
|
"procinfo",
|
||||||
"proto_array",
|
"proto_array",
|
||||||
"psutil",
|
"psutil",
|
||||||
@ -2269,6 +2356,7 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"slashing_protection",
|
"slashing_protection",
|
||||||
"store",
|
"store",
|
||||||
|
"tokio",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2598,11 +2686,11 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"cargo_metadata",
|
"cargo_metadata",
|
||||||
"chrono",
|
"chrono",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"ethabi 18.0.0",
|
"ethabi 18.0.0",
|
||||||
"generic-array",
|
"generic-array",
|
||||||
"hex",
|
"hex",
|
||||||
"k256",
|
"k256 0.11.6",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"open-fastrlp",
|
"open-fastrlp",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
@ -2711,6 +2799,7 @@ dependencies = [
|
|||||||
"lru 0.7.8",
|
"lru 0.7.8",
|
||||||
"mev-rs",
|
"mev-rs",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
|
"pretty_reqwest_error",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
@ -2785,6 +2874,16 @@ dependencies = [
|
|||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ff"
|
||||||
|
version = "0.13.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449"
|
||||||
|
dependencies = [
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ffi-opaque"
|
name = "ffi-opaque"
|
||||||
version = "2.0.1"
|
version = "2.0.1"
|
||||||
@ -3063,6 +3162,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"typenum",
|
"typenum",
|
||||||
"version_check",
|
"version_check",
|
||||||
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3172,7 +3272,18 @@ version = "0.12.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
|
checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ff",
|
"ff 0.12.1",
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "group"
|
||||||
|
version = "0.13.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
|
||||||
|
dependencies = [
|
||||||
|
"ff 0.13.0",
|
||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
@ -3909,12 +4020,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b"
|
checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"ecdsa",
|
"ecdsa 0.14.8",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"sha2 0.10.6",
|
"sha2 0.10.6",
|
||||||
"sha3 0.10.8",
|
"sha3 0.10.8",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "k256"
|
||||||
|
version = "0.13.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"ecdsa 0.16.7",
|
||||||
|
"elliptic-curve 0.13.5",
|
||||||
|
"once_cell",
|
||||||
|
"sha2 0.10.6",
|
||||||
|
"signature 2.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "keccak"
|
name = "keccak"
|
||||||
version = "0.1.4"
|
version = "0.1.4"
|
||||||
@ -4131,7 +4256,7 @@ checksum = "b1fff5bd889c82a0aec668f2045edd066f559d4e5c40354e5a4c77ac00caac38"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"asn1_der",
|
"asn1_der",
|
||||||
"bs58",
|
"bs58",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek 1.0.1",
|
||||||
"either",
|
"either",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
@ -4166,7 +4291,7 @@ checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"asn1_der",
|
"asn1_der",
|
||||||
"bs58",
|
"bs58",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek 1.0.1",
|
||||||
"either",
|
"either",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures",
|
"futures",
|
||||||
@ -4185,7 +4310,7 @@ dependencies = [
|
|||||||
"prost-build",
|
"prost-build",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rw-stream-sink",
|
"rw-stream-sink",
|
||||||
"sec1",
|
"sec1 0.3.0",
|
||||||
"sha2 0.10.6",
|
"sha2 0.10.6",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
@ -4294,7 +4419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1"
|
checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bs58",
|
"bs58",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek 1.0.1",
|
||||||
"log",
|
"log",
|
||||||
"multiaddr 0.17.1",
|
"multiaddr 0.17.1",
|
||||||
"multihash 0.17.0",
|
"multihash 0.17.0",
|
||||||
@ -4914,6 +5039,12 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mediatype"
|
||||||
|
version = "0.19.13"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fea6e62614ab2fc0faa58bb15102a0382d368f896a9fa4776592589ab55c4de7"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "memchr"
|
name = "memchr"
|
||||||
version = "2.5.0"
|
version = "2.5.0"
|
||||||
@ -5638,9 +5769,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openssl"
|
name = "openssl"
|
||||||
version = "0.10.52"
|
version = "0.10.55"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56"
|
checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
@ -5679,9 +5810,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openssl-sys"
|
name = "openssl-sys"
|
||||||
version = "0.9.87"
|
version = "0.9.90"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e"
|
checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"libc",
|
"libc",
|
||||||
@ -5726,8 +5857,8 @@ version = "0.11.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594"
|
checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ecdsa",
|
"ecdsa 0.14.8",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"sha2 0.10.6",
|
"sha2 0.10.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -5737,8 +5868,8 @@ version = "0.11.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa"
|
checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ecdsa",
|
"ecdsa 0.14.8",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"sha2 0.10.6",
|
"sha2 0.10.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -5994,8 +6125,18 @@ version = "0.9.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
|
checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"der",
|
"der 0.6.1",
|
||||||
"spki",
|
"spki 0.6.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pkcs8"
|
||||||
|
version = "0.10.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
|
||||||
|
dependencies = [
|
||||||
|
"der 0.7.6",
|
||||||
|
"spki 0.7.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6139,6 +6280,14 @@ dependencies = [
|
|||||||
"vcpkg",
|
"vcpkg",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pretty_reqwest_error"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"reqwest",
|
||||||
|
"sensitive_url",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prettyplease"
|
name = "prettyplease"
|
||||||
version = "0.1.25"
|
version = "0.1.25"
|
||||||
@ -6738,11 +6887,21 @@ version = "0.3.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb"
|
checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crypto-bigint",
|
"crypto-bigint 0.4.9",
|
||||||
"hmac 0.12.1",
|
"hmac 0.12.1",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rfc6979"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
|
||||||
|
dependencies = [
|
||||||
|
"hmac 0.12.1",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ring"
|
name = "ring"
|
||||||
version = "0.16.20"
|
version = "0.16.20"
|
||||||
@ -7119,10 +7278,24 @@ version = "0.3.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928"
|
checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base16ct",
|
"base16ct 0.1.1",
|
||||||
"der",
|
"der 0.6.1",
|
||||||
"generic-array",
|
"generic-array",
|
||||||
"pkcs8",
|
"pkcs8 0.9.0",
|
||||||
|
"subtle",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sec1"
|
||||||
|
version = "0.7.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f0aec48e813d6b90b15f0b8948af3c63483992dee44c03e9930b3eebdabe046e"
|
||||||
|
dependencies = [
|
||||||
|
"base16ct 0.2.0",
|
||||||
|
"der 0.7.6",
|
||||||
|
"generic-array",
|
||||||
|
"pkcs8 0.10.2",
|
||||||
"subtle",
|
"subtle",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
@ -7411,6 +7584,16 @@ dependencies = [
|
|||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "signature"
|
||||||
|
version = "2.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500"
|
||||||
|
dependencies = [
|
||||||
|
"digest 0.10.7",
|
||||||
|
"rand_core 0.6.4",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "simple_asn1"
|
name = "simple_asn1"
|
||||||
version = "0.6.2"
|
version = "0.6.2"
|
||||||
@ -7651,14 +7834,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "snow"
|
name = "snow"
|
||||||
version = "0.9.2"
|
version = "0.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733"
|
checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes-gcm 0.9.4",
|
"aes-gcm 0.9.4",
|
||||||
"blake2",
|
"blake2",
|
||||||
"chacha20poly1305",
|
"chacha20poly1305",
|
||||||
"curve25519-dalek 4.0.0-rc.1",
|
"curve25519-dalek 4.0.0-rc.2",
|
||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
"ring",
|
"ring",
|
||||||
"rustc_version 0.4.0",
|
"rustc_version 0.4.0",
|
||||||
@ -7715,7 +7898,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b"
|
checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64ct",
|
"base64ct",
|
||||||
"der",
|
"der 0.6.1",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "spki"
|
||||||
|
version = "0.7.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
|
||||||
|
dependencies = [
|
||||||
|
"base64ct",
|
||||||
|
"der 0.7.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -9432,9 +9625,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webrtc-dtls"
|
name = "webrtc-dtls"
|
||||||
version = "0.7.1"
|
version = "0.7.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05"
|
checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes 0.6.0",
|
"aes 0.6.0",
|
||||||
"aes-gcm 0.10.2",
|
"aes-gcm 0.10.2",
|
||||||
@ -9445,29 +9638,28 @@ dependencies = [
|
|||||||
"ccm",
|
"ccm",
|
||||||
"curve25519-dalek 3.2.0",
|
"curve25519-dalek 3.2.0",
|
||||||
"der-parser 8.2.0",
|
"der-parser 8.2.0",
|
||||||
"elliptic-curve",
|
"elliptic-curve 0.12.3",
|
||||||
"hkdf",
|
"hkdf",
|
||||||
"hmac 0.12.1",
|
"hmac 0.12.1",
|
||||||
"log",
|
"log",
|
||||||
"oid-registry 0.6.1",
|
|
||||||
"p256",
|
"p256",
|
||||||
"p384",
|
"p384",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
"rcgen 0.9.3",
|
"rcgen 0.10.0",
|
||||||
"ring",
|
"ring",
|
||||||
"rustls 0.19.1",
|
"rustls 0.19.1",
|
||||||
"sec1",
|
"sec1 0.3.0",
|
||||||
"serde",
|
"serde",
|
||||||
"sha1",
|
"sha1",
|
||||||
"sha2 0.10.6",
|
"sha2 0.10.6",
|
||||||
"signature",
|
"signature 1.6.4",
|
||||||
"subtle",
|
"subtle",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"webpki 0.21.4",
|
"webpki 0.21.4",
|
||||||
"webrtc-util",
|
"webrtc-util",
|
||||||
"x25519-dalek 2.0.0-pre.1",
|
"x25519-dalek 2.0.0-rc.2",
|
||||||
"x509-parser 0.13.2",
|
"x509-parser 0.13.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -9910,12 +10102,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "x25519-dalek"
|
name = "x25519-dalek"
|
||||||
version = "2.0.0-pre.1"
|
version = "2.0.0-rc.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df"
|
checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"curve25519-dalek 3.2.0",
|
"curve25519-dalek 4.0.0-rc.2",
|
||||||
"rand_core 0.6.4",
|
"rand_core 0.6.4",
|
||||||
|
"serde",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ members = [
|
|||||||
"common/lru_cache",
|
"common/lru_cache",
|
||||||
"common/malloc_utils",
|
"common/malloc_utils",
|
||||||
"common/oneshot_broadcast",
|
"common/oneshot_broadcast",
|
||||||
|
"common/pretty_reqwest_error",
|
||||||
"common/sensitive_url",
|
"common/sensitive_url",
|
||||||
"common/slot_clock",
|
"common/slot_clock",
|
||||||
"common/system_health",
|
"common/system_health",
|
||||||
|
6
Makefile
6
Makefile
@ -177,7 +177,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
|
|||||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||||
lint:
|
lint:
|
||||||
cargo clippy --workspace --tests -- \
|
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \
|
||||||
-D clippy::fn_to_numeric_cast_any \
|
-D clippy::fn_to_numeric_cast_any \
|
||||||
-D warnings \
|
-D warnings \
|
||||||
-A clippy::uninlined-format-args \
|
-A clippy::uninlined-format-args \
|
||||||
@ -188,6 +188,10 @@ lint:
|
|||||||
-A clippy::question-mark \
|
-A clippy::question-mark \
|
||||||
-A clippy::uninlined-format-args
|
-A clippy::uninlined-format-args
|
||||||
|
|
||||||
|
# Lints the code using Clippy and automatically fix some simple compiler warnings.
|
||||||
|
lint-fix:
|
||||||
|
EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint
|
||||||
|
|
||||||
nightly-lint:
|
nightly-lint:
|
||||||
cp .github/custom/clippy.toml .
|
cp .github/custom/clippy.toml .
|
||||||
cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
|
cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \
|
||||||
|
@ -35,9 +35,10 @@ sloggers = { version = "2.1.1", features = ["json"] }
|
|||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
ethereum_hashing = "1.0.0-beta.2"
|
ethereum_hashing = "1.0.0-beta.2"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
ethereum_ssz_derive = "0.5.0"
|
ethereum_ssz_derive = "0.5.0"
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
|
tree_hash_derive = "0.5.0"
|
||||||
tree_hash = "0.5.0"
|
tree_hash = "0.5.0"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
tokio = "1.14.0"
|
tokio = "1.14.0"
|
||||||
|
@ -117,14 +117,14 @@ pub enum Error {
|
|||||||
///
|
///
|
||||||
/// The peer has sent an invalid message.
|
/// The peer has sent an invalid message.
|
||||||
AggregatorPubkeyUnknown(u64),
|
AggregatorPubkeyUnknown(u64),
|
||||||
/// The attestation has been seen before; either in a block, on the gossip network or from a
|
/// The attestation or a superset of this attestation's aggregations bits for the same data
|
||||||
/// local validator.
|
/// has been seen before; either in a block, on the gossip network or from a local validator.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// It's unclear if this attestation is valid, however we have already observed it and do not
|
/// It's unclear if this attestation is valid, however we have already observed it and do not
|
||||||
/// need to observe it again.
|
/// need to observe it again.
|
||||||
AttestationAlreadyKnown(Hash256),
|
AttestationSupersetKnown(Hash256),
|
||||||
/// There has already been an aggregation observed for this validator, we refuse to process a
|
/// There has already been an aggregation observed for this validator, we refuse to process a
|
||||||
/// second.
|
/// second.
|
||||||
///
|
///
|
||||||
@ -268,7 +268,7 @@ enum CheckAttestationSignature {
|
|||||||
struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> {
|
struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> {
|
||||||
signed_aggregate: &'a SignedAggregateAndProof<T::EthSpec>,
|
signed_aggregate: &'a SignedAggregateAndProof<T::EthSpec>,
|
||||||
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
||||||
attestation_root: Hash256,
|
attestation_data_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
|
/// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
|
||||||
@ -467,14 +467,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the valid aggregated attestation has not already been seen locally.
|
// Ensure the valid aggregated attestation has not already been seen locally.
|
||||||
let attestation_root = attestation.tree_hash_root();
|
let attestation_data = &attestation.data;
|
||||||
|
let attestation_data_root = attestation_data.tree_hash_root();
|
||||||
|
|
||||||
if chain
|
if chain
|
||||||
.observed_attestations
|
.observed_attestations
|
||||||
.write()
|
.write()
|
||||||
.is_known(attestation, attestation_root)
|
.is_known_subset(attestation, attestation_data_root)
|
||||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||||
{
|
{
|
||||||
return Err(Error::AttestationAlreadyKnown(attestation_root));
|
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS);
|
||||||
|
return Err(Error::AttestationSupersetKnown(attestation_data_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||||
@ -520,7 +523,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
|||||||
if attestation.aggregation_bits.is_zero() {
|
if attestation.aggregation_bits.is_zero() {
|
||||||
Err(Error::EmptyAggregationBitfield)
|
Err(Error::EmptyAggregationBitfield)
|
||||||
} else {
|
} else {
|
||||||
Ok(attestation_root)
|
Ok(attestation_data_root)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,7 +536,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
|||||||
|
|
||||||
let attestation = &signed_aggregate.message.aggregate;
|
let attestation = &signed_aggregate.message.aggregate;
|
||||||
let aggregator_index = signed_aggregate.message.aggregator_index;
|
let aggregator_index = signed_aggregate.message.aggregator_index;
|
||||||
let attestation_root = match Self::verify_early_checks(signed_aggregate, chain) {
|
let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) {
|
||||||
Ok(root) => root,
|
Ok(root) => root,
|
||||||
Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
|
Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
|
||||||
};
|
};
|
||||||
@ -568,7 +571,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
|||||||
Ok(IndexedAggregatedAttestation {
|
Ok(IndexedAggregatedAttestation {
|
||||||
signed_aggregate,
|
signed_aggregate,
|
||||||
indexed_attestation,
|
indexed_attestation,
|
||||||
attestation_root,
|
attestation_data_root,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -577,7 +580,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
|
|||||||
/// Run the checks that happen after the indexed attestation and signature have been checked.
|
/// Run the checks that happen after the indexed attestation and signature have been checked.
|
||||||
fn verify_late_checks(
|
fn verify_late_checks(
|
||||||
signed_aggregate: &SignedAggregateAndProof<T::EthSpec>,
|
signed_aggregate: &SignedAggregateAndProof<T::EthSpec>,
|
||||||
attestation_root: Hash256,
|
attestation_data_root: Hash256,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let attestation = &signed_aggregate.message.aggregate;
|
let attestation = &signed_aggregate.message.aggregate;
|
||||||
@ -587,13 +590,14 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
|
|||||||
//
|
//
|
||||||
// It's important to double check that the attestation is not already known, otherwise two
|
// It's important to double check that the attestation is not already known, otherwise two
|
||||||
// attestations processed at the same time could be published.
|
// attestations processed at the same time could be published.
|
||||||
if let ObserveOutcome::AlreadyKnown = chain
|
if let ObserveOutcome::Subset = chain
|
||||||
.observed_attestations
|
.observed_attestations
|
||||||
.write()
|
.write()
|
||||||
.observe_item(attestation, Some(attestation_root))
|
.observe_item(attestation, Some(attestation_data_root))
|
||||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||||
{
|
{
|
||||||
return Err(Error::AttestationAlreadyKnown(attestation_root));
|
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS);
|
||||||
|
return Err(Error::AttestationSupersetKnown(attestation_data_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Observe the aggregator so we don't process another aggregate from them.
|
// Observe the aggregator so we don't process another aggregate from them.
|
||||||
@ -653,7 +657,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
|
|||||||
let IndexedAggregatedAttestation {
|
let IndexedAggregatedAttestation {
|
||||||
signed_aggregate,
|
signed_aggregate,
|
||||||
indexed_attestation,
|
indexed_attestation,
|
||||||
attestation_root,
|
attestation_data_root,
|
||||||
} = signed_aggregate;
|
} = signed_aggregate;
|
||||||
|
|
||||||
match check_signature {
|
match check_signature {
|
||||||
@ -677,7 +681,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
|
|||||||
CheckAttestationSignature::No => (),
|
CheckAttestationSignature::No => (),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_root, chain) {
|
if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) {
|
||||||
return Err(SignatureValid(indexed_attestation, e));
|
return Err(SignatureValid(indexed_attestation, e));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +71,6 @@ use execution_layer::{
|
|||||||
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
||||||
PayloadAttributes, PayloadStatus,
|
PayloadAttributes, PayloadStatus,
|
||||||
};
|
};
|
||||||
pub use fork_choice::CountUnrealized;
|
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
|
AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters,
|
||||||
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
|
InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses,
|
||||||
@ -2599,7 +2598,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub async fn process_chain_segment(
|
pub async fn process_chain_segment(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
chain_segment: Vec<BlockWrapper<T::EthSpec>>,
|
chain_segment: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> ChainSegmentResult<T::EthSpec> {
|
) -> ChainSegmentResult<T::EthSpec> {
|
||||||
let mut imported_blocks = 0;
|
let mut imported_blocks = 0;
|
||||||
@ -2666,7 +2664,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.process_block(
|
.process_block(
|
||||||
signature_verified_block.block_root(),
|
signature_verified_block.block_root(),
|
||||||
signature_verified_block,
|
signature_verified_block,
|
||||||
count_unrealized,
|
|
||||||
notify_execution_layer,
|
notify_execution_layer,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -2759,13 +2756,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub async fn process_blob(
|
pub async fn process_blob(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
blob: GossipVerifiedBlob<T::EthSpec>,
|
blob: GossipVerifiedBlob<T::EthSpec>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||||
self.check_availability_and_maybe_import(
|
self.check_availability_and_maybe_import(blob.slot(), |chain| {
|
||||||
blob.slot(),
|
chain.data_availability_checker.put_gossip_blob(blob)
|
||||||
|chain| chain.data_availability_checker.put_gossip_blob(blob),
|
})
|
||||||
count_unrealized,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2789,7 +2783,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
unverified_block: B,
|
unverified_block: B,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||||
// Start the Prometheus timer.
|
// Start the Prometheus timer.
|
||||||
@ -2813,20 +2806,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map_err(|e| self.handle_block_error(e))?;
|
.map_err(|e| self.handle_block_error(e))?;
|
||||||
|
|
||||||
match executed_block {
|
match executed_block {
|
||||||
ExecutedBlock::Available(block) => {
|
ExecutedBlock::Available(block) => self.import_available_block(Box::new(block)).await,
|
||||||
self.import_available_block(Box::new(block), count_unrealized)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
ExecutedBlock::AvailabilityPending(block) => {
|
ExecutedBlock::AvailabilityPending(block) => {
|
||||||
self.check_availability_and_maybe_import(
|
self.check_availability_and_maybe_import(block.block.slot(), |chain| {
|
||||||
block.block.slot(),
|
|
||||||
|chain| {
|
|
||||||
chain
|
chain
|
||||||
.data_availability_checker
|
.data_availability_checker
|
||||||
.put_pending_executed_block(block)
|
.put_pending_executed_block(block)
|
||||||
},
|
})
|
||||||
count_unrealized,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2836,7 +2822,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// get a fully `ExecutedBlock`
|
/// get a fully `ExecutedBlock`
|
||||||
///
|
///
|
||||||
/// An error is returned if the verification handle couldn't be awaited.
|
/// An error is returned if the verification handle couldn't be awaited.
|
||||||
async fn into_executed_block(
|
pub async fn into_executed_block(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
execution_pending_block: ExecutionPendingBlock<T>,
|
execution_pending_block: ExecutionPendingBlock<T>,
|
||||||
) -> Result<ExecutedBlock<T::EthSpec>, BlockError<T::EthSpec>> {
|
) -> Result<ExecutedBlock<T::EthSpec>, BlockError<T::EthSpec>> {
|
||||||
@ -2925,23 +2911,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>,
|
cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||||
let availability = cache_fn(self.clone())?;
|
let availability = cache_fn(self.clone())?;
|
||||||
match availability {
|
match availability {
|
||||||
Availability::Available(block) => {
|
Availability::Available(block) => self.import_available_block(block).await,
|
||||||
self.import_available_block(block, count_unrealized).await
|
|
||||||
}
|
|
||||||
Availability::MissingComponents(block_root) => Ok(
|
Availability::MissingComponents(block_root) => Ok(
|
||||||
AvailabilityProcessingStatus::MissingComponents(slot, block_root),
|
AvailabilityProcessingStatus::MissingComponents(slot, block_root),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn import_available_block(
|
pub async fn import_available_block(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
block: Box<AvailableExecutedBlock<T::EthSpec>>,
|
block: Box<AvailableExecutedBlock<T::EthSpec>>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||||
let AvailableExecutedBlock {
|
let AvailableExecutedBlock {
|
||||||
block,
|
block,
|
||||||
@ -2971,7 +2953,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
state,
|
state,
|
||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_outcome.payload_verification_status,
|
payload_verification_outcome.payload_verification_status,
|
||||||
count_unrealized,
|
|
||||||
parent_block,
|
parent_block,
|
||||||
parent_eth1_finalization_data,
|
parent_eth1_finalization_data,
|
||||||
consensus_context,
|
consensus_context,
|
||||||
@ -3017,7 +2998,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
mut state: BeaconState<T::EthSpec>,
|
mut state: BeaconState<T::EthSpec>,
|
||||||
confirmed_state_roots: Vec<Hash256>,
|
confirmed_state_roots: Vec<Hash256>,
|
||||||
payload_verification_status: PayloadVerificationStatus,
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
@ -3088,7 +3068,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&state,
|
&state,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
&self.spec,
|
&self.spec,
|
||||||
count_unrealized,
|
|
||||||
)
|
)
|
||||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||||
}
|
}
|
||||||
|
@ -149,8 +149,6 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
||||||
/// its parent.
|
/// its parent.
|
||||||
ParentUnknown(BlockWrapper<T>),
|
ParentUnknown(BlockWrapper<T>),
|
||||||
/// The block skips too many slots and is a DoS risk.
|
|
||||||
TooManySkippedSlots { parent_slot: Slot, block_slot: Slot },
|
|
||||||
/// The block slot is greater than the present slot.
|
/// The block slot is greater than the present slot.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
@ -951,9 +949,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
parent_block.root
|
parent_block.root
|
||||||
};
|
};
|
||||||
|
|
||||||
// Reject any block that exceeds our limit on skipped slots.
|
|
||||||
check_block_skip_slots(chain, parent_block.slot, block.message())?;
|
|
||||||
|
|
||||||
// We assign to a variable instead of using `if let Some` directly to ensure we drop the
|
// We assign to a variable instead of using `if let Some` directly to ensure we drop the
|
||||||
// write lock before trying to acquire it again in the `else` clause.
|
// write lock before trying to acquire it again in the `else` clause.
|
||||||
let proposer_opt = chain
|
let proposer_opt = chain
|
||||||
@ -1110,9 +1105,6 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
let (mut parent, block) = load_parent(block_root, block, chain)?;
|
||||||
|
|
||||||
// Reject any block that exceeds our limit on skipped slots.
|
|
||||||
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
|
|
||||||
|
|
||||||
let state = cheap_state_advance_to_obtain_committees(
|
let state = cheap_state_advance_to_obtain_committees(
|
||||||
&mut parent.pre_state,
|
&mut parent.pre_state,
|
||||||
parent.beacon_state_root,
|
parent.beacon_state_root,
|
||||||
@ -1327,9 +1319,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
return Err(BlockError::ParentUnknown(block.into_block_wrapper()));
|
return Err(BlockError::ParentUnknown(block.into_block_wrapper()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reject any block that exceeds our limit on skipped slots.
|
|
||||||
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform cursory checks to see if the block is even worth processing.
|
* Perform cursory checks to see if the block is even worth processing.
|
||||||
*/
|
*/
|
||||||
@ -1688,30 +1677,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check that the count of skip slots between the block and its parent does not exceed our maximum
|
|
||||||
/// value.
|
|
||||||
///
|
|
||||||
/// Whilst this is not part of the specification, we include this to help prevent us from DoS
|
|
||||||
/// attacks. In times of dire network circumstance, the user can configure the
|
|
||||||
/// `import_max_skip_slots` value.
|
|
||||||
fn check_block_skip_slots<T: BeaconChainTypes>(
|
|
||||||
chain: &BeaconChain<T>,
|
|
||||||
parent_slot: Slot,
|
|
||||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
|
||||||
) -> Result<(), BlockError<T::EthSpec>> {
|
|
||||||
// Reject any block that exceeds our limit on skipped slots.
|
|
||||||
if let Some(max_skip_slots) = chain.config.import_max_skip_slots {
|
|
||||||
if block.slot() > parent_slot + max_skip_slots {
|
|
||||||
return Err(BlockError::TooManySkippedSlots {
|
|
||||||
parent_slot,
|
|
||||||
block_slot: block.slot(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
|
/// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any).
|
||||||
fn check_block_against_anchor_slot<T: BeaconChainTypes>(
|
fn check_block_against_anchor_slot<T: BeaconChainTypes>(
|
||||||
block: BeaconBlockRef<'_, T::EthSpec>,
|
block: BeaconBlockRef<'_, T::EthSpec>,
|
||||||
|
@ -20,7 +20,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use eth1::Config as Eth1Config;
|
use eth1::Config as Eth1Config;
|
||||||
use execution_layer::ExecutionLayer;
|
use execution_layer::ExecutionLayer;
|
||||||
use fork_choice::{CountUnrealized, ForkChoice, ResetPayloadStatuses};
|
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use kzg::{Kzg, TrustedSetup};
|
use kzg::{Kzg, TrustedSetup};
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
@ -701,7 +701,6 @@ where
|
|||||||
store.clone(),
|
store.clone(),
|
||||||
Some(current_slot),
|
Some(current_slot),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24;
|
|||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
|
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
|
||||||
pub struct ChainConfig {
|
pub struct ChainConfig {
|
||||||
/// Maximum number of slots to skip when importing a consensus message (e.g., block,
|
/// Maximum number of slots to skip when importing an attestation.
|
||||||
/// attestation, etc).
|
|
||||||
///
|
///
|
||||||
/// If `None`, there is no limit.
|
/// If `None`, there is no limit.
|
||||||
pub import_max_skip_slots: Option<u64>,
|
pub import_max_skip_slots: Option<u64>,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||||
use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus};
|
use fork_choice::{ForkChoice, PayloadVerificationStatus};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use slog::{info, warn, Logger};
|
use slog::{info, warn, Logger};
|
||||||
use state_processing::state_advance::complete_state_advance;
|
use state_processing::state_advance::complete_state_advance;
|
||||||
@ -100,7 +100,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
current_slot: Option<Slot>,
|
current_slot: Option<Slot>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
count_unrealized_config: CountUnrealized,
|
|
||||||
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
|
||||||
// Fetch finalized block.
|
// Fetch finalized block.
|
||||||
let finalized_checkpoint = head_state.finalized_checkpoint();
|
let finalized_checkpoint = head_state.finalized_checkpoint();
|
||||||
@ -166,8 +165,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
|
||||||
|
|
||||||
let mut state = finalized_snapshot.beacon_state;
|
let mut state = finalized_snapshot.beacon_state;
|
||||||
let blocks_len = blocks.len();
|
for block in blocks {
|
||||||
for (i, block) in blocks.into_iter().enumerate() {
|
|
||||||
complete_state_advance(&mut state, None, block.slot(), spec)
|
complete_state_advance(&mut state, None, block.slot(), spec)
|
||||||
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
||||||
|
|
||||||
@ -190,15 +188,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
// This scenario is so rare that it seems OK to double-verify some blocks.
|
// This scenario is so rare that it seems OK to double-verify some blocks.
|
||||||
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
let payload_verification_status = PayloadVerificationStatus::Optimistic;
|
||||||
|
|
||||||
// Because we are replaying a single chain of blocks, we only need to calculate unrealized
|
|
||||||
// justification for the last block in the chain.
|
|
||||||
let is_last_block = i + 1 == blocks_len;
|
|
||||||
let count_unrealized = if is_last_block {
|
|
||||||
count_unrealized_config
|
|
||||||
} else {
|
|
||||||
CountUnrealized::False
|
|
||||||
};
|
|
||||||
|
|
||||||
fork_choice
|
fork_choice
|
||||||
.on_block(
|
.on_block(
|
||||||
block.slot(),
|
block.slot(),
|
||||||
@ -209,7 +198,6 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
&state,
|
&state,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
spec,
|
spec,
|
||||||
count_unrealized,
|
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ pub mod validator_pubkey_cache;
|
|||||||
|
|
||||||
pub use self::beacon_chain::{
|
pub use self::beacon_chain::{
|
||||||
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes,
|
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes,
|
||||||
BeaconStore, ChainSegmentResult, CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate,
|
BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate,
|
||||||
ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
|
ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
|
||||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||||
@ -70,7 +70,7 @@ pub use attestation_verification::Error as AttestationError;
|
|||||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||||
pub use block_verification::{
|
pub use block_verification::{
|
||||||
get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock,
|
get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock,
|
||||||
ExecutionPayloadError, GossipVerifiedBlock, IntoExecutionPendingBlock,
|
ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock,
|
||||||
PayloadVerificationOutcome, PayloadVerificationStatus,
|
PayloadVerificationOutcome, PayloadVerificationStatus,
|
||||||
};
|
};
|
||||||
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
||||||
|
@ -1016,6 +1016,17 @@ lazy_static! {
|
|||||||
"light_client_optimistic_update_verification_success_total",
|
"light_client_optimistic_update_verification_success_total",
|
||||||
"Number of light client optimistic updates verified for gossip"
|
"Number of light client optimistic updates verified for gossip"
|
||||||
);
|
);
|
||||||
|
/*
|
||||||
|
* Aggregate subset metrics
|
||||||
|
*/
|
||||||
|
pub static ref SYNC_CONTRIBUTION_SUBSETS: Result<IntCounter> = try_create_int_counter(
|
||||||
|
"beacon_sync_contribution_subsets_total",
|
||||||
|
"Count of new sync contributions that are subsets of already known aggregates"
|
||||||
|
);
|
||||||
|
pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result<IntCounter> = try_create_int_counter(
|
||||||
|
"beacon_aggregated_attestation_subsets_total",
|
||||||
|
"Count of new aggregated attestations that are subsets of already known aggregates"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
//! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
|
//! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or
|
||||||
//! sync committee contributions if we've already seen them.
|
//! sync committee contributions if we've already seen them.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use crate::sync_committee_verification::SyncCommitteeData;
|
||||||
|
use ssz_types::{BitList, BitVector};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::consts::altair::{
|
use types::consts::altair::{
|
||||||
@ -10,8 +12,16 @@ use types::consts::altair::{
|
|||||||
use types::slot_data::SlotData;
|
use types::slot_data::SlotData;
|
||||||
use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution};
|
use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution};
|
||||||
|
|
||||||
pub type ObservedSyncContributions<E> = ObservedAggregates<SyncCommitteeContribution<E>, E>;
|
pub type ObservedSyncContributions<E> = ObservedAggregates<
|
||||||
pub type ObservedAggregateAttestations<E> = ObservedAggregates<Attestation<E>, E>;
|
SyncCommitteeContribution<E>,
|
||||||
|
E,
|
||||||
|
BitVector<<E as types::EthSpec>::SyncSubcommitteeSize>,
|
||||||
|
>;
|
||||||
|
pub type ObservedAggregateAttestations<E> = ObservedAggregates<
|
||||||
|
Attestation<E>,
|
||||||
|
E,
|
||||||
|
BitList<<E as types::EthSpec>::MaxValidatorsPerCommittee>,
|
||||||
|
>;
|
||||||
|
|
||||||
/// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
|
/// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`.
|
||||||
pub trait Consts {
|
pub trait Consts {
|
||||||
@ -69,10 +79,81 @@ impl<T: EthSpec> Consts for SyncCommitteeContribution<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A trait for types that implement a behaviour where one object of that type
|
||||||
|
/// can be a subset/superset of another.
|
||||||
|
/// This trait allows us to be generic over the aggregate item that we store in the cache that
|
||||||
|
/// we want to prevent duplicates/subsets for.
|
||||||
|
pub trait SubsetItem {
|
||||||
|
/// The item that is stored for later comparison with new incoming aggregate items.
|
||||||
|
type Item;
|
||||||
|
|
||||||
|
/// Returns `true` if `self` is a non-strict subset of `other` and `false` otherwise.
|
||||||
|
fn is_subset(&self, other: &Self::Item) -> bool;
|
||||||
|
|
||||||
|
/// Returns `true` if `self` is a non-strict superset of `other` and `false` otherwise.
|
||||||
|
fn is_superset(&self, other: &Self::Item) -> bool;
|
||||||
|
|
||||||
|
/// Returns the item that gets stored in `ObservedAggregates` for later subset
|
||||||
|
/// comparison with incoming aggregates.
|
||||||
|
fn get_item(&self) -> Self::Item;
|
||||||
|
|
||||||
|
/// Returns a unique value that keys the object to the item that is being stored
|
||||||
|
/// in `ObservedAggregates`.
|
||||||
|
fn root(&self) -> Hash256;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> SubsetItem for Attestation<T> {
|
||||||
|
type Item = BitList<T::MaxValidatorsPerCommittee>;
|
||||||
|
fn is_subset(&self, other: &Self::Item) -> bool {
|
||||||
|
self.aggregation_bits.is_subset(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_superset(&self, other: &Self::Item) -> bool {
|
||||||
|
other.is_subset(&self.aggregation_bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the sync contribution aggregation bits.
|
||||||
|
fn get_item(&self) -> Self::Item {
|
||||||
|
self.aggregation_bits.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the hash tree root of the attestation data.
|
||||||
|
fn root(&self) -> Hash256 {
|
||||||
|
self.data.tree_hash_root()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> SubsetItem for SyncCommitteeContribution<T> {
|
||||||
|
type Item = BitVector<T::SyncSubcommitteeSize>;
|
||||||
|
fn is_subset(&self, other: &Self::Item) -> bool {
|
||||||
|
self.aggregation_bits.is_subset(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_superset(&self, other: &Self::Item) -> bool {
|
||||||
|
other.is_subset(&self.aggregation_bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the sync contribution aggregation bits.
|
||||||
|
fn get_item(&self) -> Self::Item {
|
||||||
|
self.aggregation_bits.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the hash tree root of the root, slot and subcommittee index
|
||||||
|
/// of the sync contribution.
|
||||||
|
fn root(&self) -> Hash256 {
|
||||||
|
SyncCommitteeData {
|
||||||
|
root: self.beacon_block_root,
|
||||||
|
slot: self.slot,
|
||||||
|
subcommittee_index: self.subcommittee_index,
|
||||||
|
}
|
||||||
|
.tree_hash_root()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum ObserveOutcome {
|
pub enum ObserveOutcome {
|
||||||
/// This item was already known.
|
/// This item is a non-strict subset of an already known item.
|
||||||
AlreadyKnown,
|
Subset,
|
||||||
/// This was the first time this item was observed.
|
/// This was the first time this item was observed.
|
||||||
New,
|
New,
|
||||||
}
|
}
|
||||||
@ -94,26 +175,28 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `HashSet` that contains entries related to some `Slot`.
|
/// A `HashMap` that contains entries related to some `Slot`.
|
||||||
struct SlotHashSet {
|
struct SlotHashSet<I> {
|
||||||
set: HashSet<Hash256>,
|
/// Contains a vector of maximally-sized aggregation bitfields/bitvectors
|
||||||
|
/// such that no bitfield/bitvector is a subset of any other in the list.
|
||||||
|
map: HashMap<Hash256, Vec<I>>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
max_capacity: usize,
|
max_capacity: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SlotHashSet {
|
impl<I> SlotHashSet<I> {
|
||||||
pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self {
|
pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
slot,
|
slot,
|
||||||
set: HashSet::with_capacity(initial_capacity),
|
map: HashMap::with_capacity(initial_capacity),
|
||||||
max_capacity,
|
max_capacity,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the items in self so future observations recognise its existence.
|
/// Store the items in self so future observations recognise its existence.
|
||||||
pub fn observe_item<T: SlotData>(
|
pub fn observe_item<S: SlotData + SubsetItem<Item = I>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
item: &T,
|
item: &S,
|
||||||
root: Hash256,
|
root: Hash256,
|
||||||
) -> Result<ObserveOutcome, Error> {
|
) -> Result<ObserveOutcome, Error> {
|
||||||
if item.get_slot() != self.slot {
|
if item.get_slot() != self.slot {
|
||||||
@ -123,29 +206,45 @@ impl SlotHashSet {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.set.contains(&root) {
|
if let Some(aggregates) = self.map.get_mut(&root) {
|
||||||
Ok(ObserveOutcome::AlreadyKnown)
|
for existing in aggregates {
|
||||||
} else {
|
// Check if `item` is a subset of any of the observed aggregates
|
||||||
|
if item.is_subset(existing) {
|
||||||
|
return Ok(ObserveOutcome::Subset);
|
||||||
|
// Check if `item` is a superset of any of the observed aggregates
|
||||||
|
// If true, we replace the new item with its existing subset. This allows us
|
||||||
|
// to hold fewer items in the list.
|
||||||
|
} else if item.is_superset(existing) {
|
||||||
|
*existing = item.get_item();
|
||||||
|
return Ok(ObserveOutcome::New);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Here we check to see if this slot has reached the maximum observation count.
|
// Here we check to see if this slot has reached the maximum observation count.
|
||||||
//
|
//
|
||||||
// The resulting behaviour is that we are no longer able to successfully observe new
|
// The resulting behaviour is that we are no longer able to successfully observe new
|
||||||
// items, however we will continue to return `is_known` values. We could also
|
// items, however we will continue to return `is_known_subset` values. We could also
|
||||||
// disable `is_known`, however then we would stop forwarding items across the
|
// disable `is_known_subset`, however then we would stop forwarding items across the
|
||||||
// gossip network and I think that this is a worse case than sending some invalid ones.
|
// gossip network and I think that this is a worse case than sending some invalid ones.
|
||||||
// The underlying libp2p network is responsible for removing duplicate messages, so
|
// The underlying libp2p network is responsible for removing duplicate messages, so
|
||||||
// this doesn't risk a broadcast loop.
|
// this doesn't risk a broadcast loop.
|
||||||
if self.set.len() >= self.max_capacity {
|
if self.map.len() >= self.max_capacity {
|
||||||
return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity));
|
return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.set.insert(root);
|
let item = item.get_item();
|
||||||
|
self.map.entry(root).or_default().push(item);
|
||||||
Ok(ObserveOutcome::New)
|
Ok(ObserveOutcome::New)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates if `item` has been observed before.
|
/// Check if `item` is a non-strict subset of any of the already observed aggregates for
|
||||||
pub fn is_known<T: SlotData>(&self, item: &T, root: Hash256) -> Result<bool, Error> {
|
/// the given root and slot.
|
||||||
|
pub fn is_known_subset<S: SlotData + SubsetItem<Item = I>>(
|
||||||
|
&self,
|
||||||
|
item: &S,
|
||||||
|
root: Hash256,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
if item.get_slot() != self.slot {
|
if item.get_slot() != self.slot {
|
||||||
return Err(Error::IncorrectSlot {
|
return Err(Error::IncorrectSlot {
|
||||||
expected: self.slot,
|
expected: self.slot,
|
||||||
@ -153,25 +252,28 @@ impl SlotHashSet {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(self.set.contains(&root))
|
Ok(self
|
||||||
|
.map
|
||||||
|
.get(&root)
|
||||||
|
.map_or(false, |agg| agg.iter().any(|val| item.is_subset(val))))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of observed items in `self`.
|
/// The number of observed items in `self`.
|
||||||
pub fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
self.set.len()
|
self.map.len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Stores the roots of objects for some number of `Slots`, so we can determine if
|
/// Stores the roots of objects for some number of `Slots`, so we can determine if
|
||||||
/// these have previously been seen on the network.
|
/// these have previously been seen on the network.
|
||||||
pub struct ObservedAggregates<T: TreeHash + SlotData + Consts, E: EthSpec> {
|
pub struct ObservedAggregates<T: SlotData + Consts, E: EthSpec, I> {
|
||||||
lowest_permissible_slot: Slot,
|
lowest_permissible_slot: Slot,
|
||||||
sets: Vec<SlotHashSet>,
|
sets: Vec<SlotHashSet<I>>,
|
||||||
_phantom_spec: PhantomData<E>,
|
_phantom_spec: PhantomData<E>,
|
||||||
_phantom_tree_hash: PhantomData<T>,
|
_phantom_tree_hash: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates<T, E> {
|
impl<T: SlotData + Consts, E: EthSpec, I> Default for ObservedAggregates<T, E, I> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
lowest_permissible_slot: Slot::new(0),
|
lowest_permissible_slot: Slot::new(0),
|
||||||
@ -182,17 +284,17 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> Default for ObservedAggregates
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> {
|
impl<T: SlotData + Consts + SubsetItem<Item = I>, E: EthSpec, I> ObservedAggregates<T, E, I> {
|
||||||
/// Store the root of `item` in `self`.
|
/// Store `item` in `self` keyed at `root`.
|
||||||
///
|
///
|
||||||
/// `root` must equal `item.tree_hash_root()`.
|
/// `root` must equal `item.root::<SubsetItem>()`.
|
||||||
pub fn observe_item(
|
pub fn observe_item(
|
||||||
&mut self,
|
&mut self,
|
||||||
item: &T,
|
item: &T,
|
||||||
root_opt: Option<Hash256>,
|
root_opt: Option<Hash256>,
|
||||||
) -> Result<ObserveOutcome, Error> {
|
) -> Result<ObserveOutcome, Error> {
|
||||||
let index = self.get_set_index(item.get_slot())?;
|
let index = self.get_set_index(item.get_slot())?;
|
||||||
let root = root_opt.unwrap_or_else(|| item.tree_hash_root());
|
let root = root_opt.unwrap_or_else(|| item.root());
|
||||||
|
|
||||||
self.sets
|
self.sets
|
||||||
.get_mut(index)
|
.get_mut(index)
|
||||||
@ -200,17 +302,18 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> {
|
|||||||
.and_then(|set| set.observe_item(item, root))
|
.and_then(|set| set.observe_item(item, root))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check to see if the `root` of `item` is in self.
|
/// Check if `item` is a non-strict subset of any of the already observed aggregates for
|
||||||
|
/// the given root and slot.
|
||||||
///
|
///
|
||||||
/// `root` must equal `a.tree_hash_root()`.
|
/// `root` must equal `item.root::<SubsetItem>()`.
|
||||||
#[allow(clippy::wrong_self_convention)]
|
#[allow(clippy::wrong_self_convention)]
|
||||||
pub fn is_known(&mut self, item: &T, root: Hash256) -> Result<bool, Error> {
|
pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result<bool, Error> {
|
||||||
let index = self.get_set_index(item.get_slot())?;
|
let index = self.get_set_index(item.get_slot())?;
|
||||||
|
|
||||||
self.sets
|
self.sets
|
||||||
.get(index)
|
.get(index)
|
||||||
.ok_or(Error::InvalidSetIndex(index))
|
.ok_or(Error::InvalidSetIndex(index))
|
||||||
.and_then(|set| set.is_known(item, root))
|
.and_then(|set| set.is_known_subset(item, root))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The maximum number of slots that items are stored for.
|
/// The maximum number of slots that items are stored for.
|
||||||
@ -296,7 +399,6 @@ impl<T: TreeHash + SlotData + Consts, E: EthSpec> ObservedAggregates<T, E> {
|
|||||||
#[cfg(not(debug_assertions))]
|
#[cfg(not(debug_assertions))]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::{test_utils::test_random_instance, Hash256};
|
use types::{test_utils::test_random_instance, Hash256};
|
||||||
|
|
||||||
type E = types::MainnetEthSpec;
|
type E = types::MainnetEthSpec;
|
||||||
@ -330,7 +432,7 @@ mod tests {
|
|||||||
|
|
||||||
for a in &items {
|
for a in &items {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store.is_known(a, a.tree_hash_root()),
|
store.is_known_subset(a, a.root()),
|
||||||
Ok(false),
|
Ok(false),
|
||||||
"should indicate an unknown attestation is unknown"
|
"should indicate an unknown attestation is unknown"
|
||||||
);
|
);
|
||||||
@ -343,13 +445,13 @@ mod tests {
|
|||||||
|
|
||||||
for a in &items {
|
for a in &items {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store.is_known(a, a.tree_hash_root()),
|
store.is_known_subset(a, a.root()),
|
||||||
Ok(true),
|
Ok(true),
|
||||||
"should indicate a known attestation is known"
|
"should indicate a known attestation is known"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store.observe_item(a, Some(a.tree_hash_root())),
|
store.observe_item(a, Some(a.root())),
|
||||||
Ok(ObserveOutcome::AlreadyKnown),
|
Ok(ObserveOutcome::Subset),
|
||||||
"should acknowledge an existing attestation"
|
"should acknowledge an existing attestation"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ use bls::{verify_signature_sets, PublicKeyBytes};
|
|||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
use safe_arith::ArithError;
|
use safe_arith::ArithError;
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError;
|
use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError;
|
||||||
use state_processing::signature_sets::{
|
use state_processing::signature_sets::{
|
||||||
signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set,
|
signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set,
|
||||||
@ -47,6 +48,7 @@ use std::borrow::Cow;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use strum::AsRefStr;
|
use strum::AsRefStr;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
|
use tree_hash_derive::TreeHash;
|
||||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||||
use types::slot_data::SlotData;
|
use types::slot_data::SlotData;
|
||||||
use types::sync_committee::Error as SyncCommitteeError;
|
use types::sync_committee::Error as SyncCommitteeError;
|
||||||
@ -110,14 +112,14 @@ pub enum Error {
|
|||||||
///
|
///
|
||||||
/// The peer has sent an invalid message.
|
/// The peer has sent an invalid message.
|
||||||
AggregatorPubkeyUnknown(u64),
|
AggregatorPubkeyUnknown(u64),
|
||||||
/// The sync contribution has been seen before; either in a block, on the gossip network or from a
|
/// The sync contribution or a superset of this sync contribution's aggregation bits for the same data
|
||||||
/// local validator.
|
/// has been seen before; either in a block on the gossip network or from a local validator.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// It's unclear if this sync contribution is valid, however we have already observed it and do not
|
/// It's unclear if this sync contribution is valid, however we have already observed it and do not
|
||||||
/// need to observe it again.
|
/// need to observe it again.
|
||||||
SyncContributionAlreadyKnown(Hash256),
|
SyncContributionSupersetKnown(Hash256),
|
||||||
/// There has already been an aggregation observed for this validator, we refuse to process a
|
/// There has already been an aggregation observed for this validator, we refuse to process a
|
||||||
/// second.
|
/// second.
|
||||||
///
|
///
|
||||||
@ -268,6 +270,14 @@ pub struct VerifiedSyncContribution<T: BeaconChainTypes> {
|
|||||||
participant_pubkeys: Vec<PublicKeyBytes>,
|
participant_pubkeys: Vec<PublicKeyBytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The sync contribution data.
|
||||||
|
#[derive(Encode, Decode, TreeHash)]
|
||||||
|
pub struct SyncCommitteeData {
|
||||||
|
pub slot: Slot,
|
||||||
|
pub root: Hash256,
|
||||||
|
pub subcommittee_index: u64,
|
||||||
|
}
|
||||||
|
|
||||||
/// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
|
/// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct VerifiedSyncCommitteeMessage {
|
pub struct VerifiedSyncCommitteeMessage {
|
||||||
@ -314,15 +324,22 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> {
|
|||||||
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
return Err(Error::AggregatorNotInCommittee { aggregator_index });
|
||||||
};
|
};
|
||||||
|
|
||||||
// Ensure the valid sync contribution has not already been seen locally.
|
// Ensure the valid sync contribution or its superset has not already been seen locally.
|
||||||
let contribution_root = contribution.tree_hash_root();
|
let contribution_data_root = SyncCommitteeData {
|
||||||
|
slot: contribution.slot,
|
||||||
|
root: contribution.beacon_block_root,
|
||||||
|
subcommittee_index: contribution.subcommittee_index,
|
||||||
|
}
|
||||||
|
.tree_hash_root();
|
||||||
|
|
||||||
if chain
|
if chain
|
||||||
.observed_sync_contributions
|
.observed_sync_contributions
|
||||||
.write()
|
.write()
|
||||||
.is_known(contribution, contribution_root)
|
.is_known_subset(contribution, contribution_data_root)
|
||||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||||
{
|
{
|
||||||
return Err(Error::SyncContributionAlreadyKnown(contribution_root));
|
metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS);
|
||||||
|
return Err(Error::SyncContributionSupersetKnown(contribution_data_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
||||||
@ -376,13 +393,14 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> {
|
|||||||
//
|
//
|
||||||
// It's important to double check that the contribution is not already known, otherwise two
|
// It's important to double check that the contribution is not already known, otherwise two
|
||||||
// contribution processed at the same time could be published.
|
// contribution processed at the same time could be published.
|
||||||
if let ObserveOutcome::AlreadyKnown = chain
|
if let ObserveOutcome::Subset = chain
|
||||||
.observed_sync_contributions
|
.observed_sync_contributions
|
||||||
.write()
|
.write()
|
||||||
.observe_item(contribution, Some(contribution_root))
|
.observe_item(contribution, Some(contribution_data_root))
|
||||||
.map_err(|e| Error::BeaconChainError(e.into()))?
|
.map_err(|e| Error::BeaconChainError(e.into()))?
|
||||||
{
|
{
|
||||||
return Err(Error::SyncContributionAlreadyKnown(contribution_root));
|
metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS);
|
||||||
|
return Err(Error::SyncContributionSupersetKnown(contribution_data_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Observe the aggregator so we don't process another aggregate from them.
|
// Observe the aggregator so we don't process another aggregate from them.
|
||||||
|
@ -24,7 +24,6 @@ use execution_layer::{
|
|||||||
},
|
},
|
||||||
ExecutionLayer,
|
ExecutionLayer,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use futures::channel::mpsc::Receiver;
|
use futures::channel::mpsc::Receiver;
|
||||||
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
@ -1758,12 +1757,7 @@ where
|
|||||||
self.set_current_slot(slot);
|
self.set_current_slot(slot);
|
||||||
let block_hash: SignedBeaconBlockHash = self
|
let block_hash: SignedBeaconBlockHash = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
block.into(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await?
|
.await?
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1781,7 +1775,6 @@ where
|
|||||||
.process_block(
|
.process_block(
|
||||||
wrapped_block.canonical_root(),
|
wrapped_block.canonical_root(),
|
||||||
wrapped_block,
|
wrapped_block,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
|
@ -699,8 +699,8 @@ async fn aggregated_gossip_verification() {
|
|||||||
|tester, err| {
|
|tester, err| {
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
err,
|
err,
|
||||||
AttnError::AttestationAlreadyKnown(hash)
|
AttnError::AttestationSupersetKnown(hash)
|
||||||
if hash == tester.valid_aggregate.message.aggregate.tree_hash_root()
|
if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root()
|
||||||
))
|
))
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -4,9 +4,12 @@ use beacon_chain::blob_verification::BlockWrapper;
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
blob_verification::AsBlock,
|
blob_verification::AsBlock,
|
||||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
|
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutedBlock,
|
||||||
|
ExecutionPendingBlock,
|
||||||
|
};
|
||||||
|
use beacon_chain::{
|
||||||
|
BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
|
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use slasher::{Config as SlasherConfig, Slasher};
|
use slasher::{Config as SlasherConfig, Slasher};
|
||||||
@ -153,18 +156,14 @@ async fn chain_segment_full_segment() {
|
|||||||
// Sneak in a little check to ensure we can process empty chain segments.
|
// Sneak in a little check to ensure we can process empty chain segments.
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(vec![], NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import empty chain segment");
|
.expect("should import empty chain segment");
|
||||||
|
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(blocks.clone(), NotifyExecutionLayer::Yes)
|
||||||
blocks.clone(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import chain segment");
|
.expect("should import chain segment");
|
||||||
@ -196,11 +195,7 @@ async fn chain_segment_varying_chunk_size() {
|
|||||||
for chunk in blocks.chunks(*chunk_size) {
|
for chunk in blocks.chunks(*chunk_size) {
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes)
|
||||||
chunk.to_vec(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
||||||
@ -239,7 +234,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -263,7 +258,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -297,7 +292,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -321,7 +316,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -347,7 +342,7 @@ async fn assert_invalid_signature(
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -369,11 +364,7 @@ async fn assert_invalid_signature(
|
|||||||
// imported prior to this test.
|
// imported prior to this test.
|
||||||
let _ = harness
|
let _ = harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||||
ancestor_blocks,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
harness.chain.recompute_head_at_current_slot().await;
|
harness.chain.recompute_head_at_current_slot().await;
|
||||||
|
|
||||||
@ -382,7 +373,6 @@ async fn assert_invalid_signature(
|
|||||||
.process_block(
|
.process_block(
|
||||||
snapshots[block_index].beacon_block.canonical_root(),
|
snapshots[block_index].beacon_block.canonical_root(),
|
||||||
snapshots[block_index].beacon_block.clone(),
|
snapshots[block_index].beacon_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@ -435,11 +425,7 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.collect();
|
.collect();
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||||
ancestor_blocks,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import all blocks prior to the one being tested");
|
.expect("should import all blocks prior to the one being tested");
|
||||||
@ -451,7 +437,6 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
signed_block.canonical_root(),
|
signed_block.canonical_root(),
|
||||||
Arc::new(signed_block),
|
Arc::new(signed_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await,
|
.await,
|
||||||
@ -486,7 +471,7 @@ async fn invalid_signature_block_proposal() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -684,7 +669,7 @@ async fn invalid_signature_deposit() {
|
|||||||
!matches!(
|
!matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
.process_chain_segment(blocks, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -764,7 +749,6 @@ async fn block_gossip_verification() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
gossip_verified.block_root,
|
gossip_verified.block_root,
|
||||||
gossip_verified,
|
gossip_verified,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1039,7 +1023,6 @@ async fn verify_block_for_gossip_slashing_detection() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1079,7 +1062,6 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1227,7 +1209,6 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
base_block.canonical_root(),
|
base_block.canonical_root(),
|
||||||
Arc::new(base_block.clone()),
|
Arc::new(base_block.clone()),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1243,11 +1224,7 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(vec![Arc::new(base_block).into()], NotifyExecutionLayer::Yes,)
|
||||||
vec![Arc::new(base_block).into()],
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await,
|
.await,
|
||||||
ChainSegmentResult::Failed {
|
ChainSegmentResult::Failed {
|
||||||
imported_blocks: 0,
|
imported_blocks: 0,
|
||||||
@ -1366,7 +1343,6 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
altair_block.canonical_root(),
|
altair_block.canonical_root(),
|
||||||
Arc::new(altair_block.clone()),
|
Arc::new(altair_block.clone()),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1384,7 +1360,6 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
.chain
|
.chain
|
||||||
.process_chain_segment(
|
.process_chain_segment(
|
||||||
vec![Arc::new(altair_block).into()],
|
vec![Arc::new(altair_block).into()],
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes
|
NotifyExecutionLayer::Yes
|
||||||
)
|
)
|
||||||
.await,
|
.await,
|
||||||
@ -1397,3 +1372,117 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
}
|
}
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn import_duplicate_block_unrealized_justification() {
|
||||||
|
let spec = MainnetEthSpec::default_spec();
|
||||||
|
|
||||||
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
|
.spec(spec)
|
||||||
|
.keypairs(KEYPAIRS[..].to_vec())
|
||||||
|
.fresh_ephemeral_store()
|
||||||
|
.mock_execution_layer()
|
||||||
|
.build();
|
||||||
|
let chain = &harness.chain;
|
||||||
|
|
||||||
|
// Move out of the genesis slot.
|
||||||
|
harness.advance_slot();
|
||||||
|
|
||||||
|
// Build the chain out to the first justification opportunity 2/3rds of the way through epoch 2.
|
||||||
|
let num_slots = E::slots_per_epoch() as usize * 8 / 3;
|
||||||
|
harness
|
||||||
|
.extend_chain(
|
||||||
|
num_slots,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Move into the next empty slot.
|
||||||
|
harness.advance_slot();
|
||||||
|
|
||||||
|
// The store's justified checkpoint must still be at epoch 0, while unrealized justification
|
||||||
|
// must be at epoch 1.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
|
||||||
|
drop(fc);
|
||||||
|
|
||||||
|
// Produce a block to justify epoch 2.
|
||||||
|
let state = harness.get_current_state();
|
||||||
|
let slot = harness.get_current_slot();
|
||||||
|
let (block_contents, _) = harness.make_block(state.clone(), slot).await;
|
||||||
|
let (block, _) = block_contents;
|
||||||
|
let block = Arc::new(block);
|
||||||
|
let block_root = block.canonical_root();
|
||||||
|
|
||||||
|
// Create two verified variants of the block, representing the same block being processed in
|
||||||
|
// parallel.
|
||||||
|
let notify_execution_layer = NotifyExecutionLayer::Yes;
|
||||||
|
let verified_block1 = block
|
||||||
|
.clone()
|
||||||
|
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||||
|
.unwrap();
|
||||||
|
let verified_block2 = block
|
||||||
|
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Import the first block, simulating a block processed via a finalized chain segment.
|
||||||
|
import_execution_pending_block(chain.clone(), verified_block1)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Unrealized justification should NOT have updated.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
let unrealized_justification = fc.unrealized_justified_checkpoint();
|
||||||
|
assert_eq!(unrealized_justification.epoch, 2);
|
||||||
|
|
||||||
|
// The fork choice node for the block should have unrealized justification.
|
||||||
|
let fc_block = fc.get_block(&block_root).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
fc_block.unrealized_justified_checkpoint,
|
||||||
|
Some(unrealized_justification)
|
||||||
|
);
|
||||||
|
drop(fc);
|
||||||
|
|
||||||
|
// Import the second verified block, simulating a block processed via RPC.
|
||||||
|
import_execution_pending_block(chain.clone(), verified_block2)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Unrealized justification should still be updated.
|
||||||
|
let fc = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
assert_eq!(fc.justified_checkpoint().epoch, 0);
|
||||||
|
assert_eq!(
|
||||||
|
fc.unrealized_justified_checkpoint(),
|
||||||
|
unrealized_justification
|
||||||
|
);
|
||||||
|
|
||||||
|
// The fork choice node for the block should still have the unrealized justified checkpoint.
|
||||||
|
let fc_block = fc.get_block(&block_root).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
fc_block.unrealized_justified_checkpoint,
|
||||||
|
Some(unrealized_justification)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn import_execution_pending_block<T: BeaconChainTypes>(
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
execution_pending_block: ExecutionPendingBlock<T>,
|
||||||
|
) -> Result<AvailabilityProcessingStatus, String> {
|
||||||
|
match chain
|
||||||
|
.clone()
|
||||||
|
.into_executed_block(execution_pending_block)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
{
|
||||||
|
ExecutedBlock::Available(block) => chain
|
||||||
|
.import_available_block(Box::from(block))
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}")),
|
||||||
|
ExecutedBlock::AvailabilityPending(_) => {
|
||||||
|
Err("AvailabilityPending not expected in this test. Block not imported.".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,9 +17,7 @@ use execution_layer::{
|
|||||||
test_utils::ExecutionBlockGenerator,
|
test_utils::ExecutionBlockGenerator,
|
||||||
ExecutionLayer, ForkchoiceState, PayloadAttributes,
|
ExecutionLayer, ForkchoiceState, PayloadAttributes,
|
||||||
};
|
};
|
||||||
use fork_choice::{
|
use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus};
|
||||||
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
|
|
||||||
};
|
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
|
use proto_array::{Error as ProtoArrayError, ExecutionStatus};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -699,7 +697,6 @@ async fn invalidates_all_descendants() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -799,7 +796,6 @@ async fn switches_heads() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1056,7 +1052,7 @@ async fn invalid_parent() {
|
|||||||
|
|
||||||
// Ensure the block built atop an invalid payload is invalid for import.
|
// Ensure the block built atop an invalid payload is invalid for import.
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await,
|
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await,
|
||||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||||
if invalid_root == parent_root
|
if invalid_root == parent_root
|
||||||
));
|
));
|
||||||
@ -1071,7 +1067,7 @@ async fn invalid_parent() {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Optimistic,
|
PayloadVerificationStatus::Optimistic,
|
||||||
&rig.harness.chain.spec,
|
&rig.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
),
|
),
|
||||||
Err(ForkChoiceError::ProtoArrayStringError(message))
|
Err(ForkChoiceError::ProtoArrayStringError(message))
|
||||||
if message.contains(&format!(
|
if message.contains(&format!(
|
||||||
@ -1342,12 +1338,7 @@ async fn build_optimistic_chain(
|
|||||||
for block in blocks {
|
for block in blocks {
|
||||||
rig.harness
|
rig.harness
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes)
|
||||||
block.canonical_root(),
|
|
||||||
block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
@ -1906,7 +1897,6 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
fork_block.clone(),
|
fork_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -13,7 +13,6 @@ use beacon_chain::{
|
|||||||
ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use eth2_network_config::TRUSTED_SETUP;
|
use eth2_network_config::TRUSTED_SETUP;
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use kzg::TrustedSetup;
|
use kzg::TrustedSetup;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
@ -2165,7 +2164,6 @@ async fn weak_subjectivity_sync() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
full_block.canonical_root(),
|
full_block.canonical_root(),
|
||||||
Arc::new(full_block),
|
Arc::new(full_block),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#![cfg(not(debug_assertions))]
|
#![cfg(not(debug_assertions))]
|
||||||
|
|
||||||
use beacon_chain::sync_committee_verification::Error as SyncCommitteeError;
|
use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData};
|
||||||
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee};
|
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@ -444,11 +444,17 @@ async fn aggregated_gossip_verification() {
|
|||||||
* subcommittee index contribution.subcommittee_index.
|
* subcommittee index contribution.subcommittee_index.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
let contribution = &valid_aggregate.message.contribution;
|
||||||
|
let sync_committee_data = SyncCommitteeData {
|
||||||
|
slot: contribution.slot,
|
||||||
|
root: contribution.beacon_block_root,
|
||||||
|
subcommittee_index: contribution.subcommittee_index,
|
||||||
|
};
|
||||||
assert_invalid!(
|
assert_invalid!(
|
||||||
"aggregate that has already been seen",
|
"aggregate that has already been seen",
|
||||||
valid_aggregate.clone(),
|
valid_aggregate.clone(),
|
||||||
SyncCommitteeError::SyncContributionAlreadyKnown(hash)
|
SyncCommitteeError::SyncContributionSupersetKnown(hash)
|
||||||
if hash == valid_aggregate.message.contribution.tree_hash_root()
|
if hash == sync_committee_data.tree_hash_root()
|
||||||
);
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -8,7 +8,6 @@ use beacon_chain::{
|
|||||||
},
|
},
|
||||||
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use operation_pool::PersistedOperationPool;
|
use operation_pool::PersistedOperationPool;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
@ -686,7 +685,6 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
|||||||
.process_block(
|
.process_block(
|
||||||
harness_a.chain.head_snapshot().beacon_block_root,
|
harness_a.chain.head_snapshot().beacon_block_root,
|
||||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -72,7 +72,7 @@ impl BuilderHttpClient {
|
|||||||
.await?
|
.await?
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a HTTP GET request, returning the `Response` for further processing.
|
/// Perform a HTTP GET request, returning the `Response` for further processing.
|
||||||
@ -85,7 +85,7 @@ impl BuilderHttpClient {
|
|||||||
if let Some(timeout) = timeout {
|
if let Some(timeout) = timeout {
|
||||||
builder = builder.timeout(timeout);
|
builder = builder.timeout(timeout);
|
||||||
}
|
}
|
||||||
let response = builder.send().await.map_err(Error::Reqwest)?;
|
let response = builder.send().await.map_err(Error::from)?;
|
||||||
ok_or_error(response).await
|
ok_or_error(response).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ impl BuilderHttpClient {
|
|||||||
if let Some(timeout) = timeout {
|
if let Some(timeout) = timeout {
|
||||||
builder = builder.timeout(timeout);
|
builder = builder.timeout(timeout);
|
||||||
}
|
}
|
||||||
let response = builder.json(body).send().await.map_err(Error::Reqwest)?;
|
let response = builder.json(body).send().await.map_err(Error::from)?;
|
||||||
ok_or_error(response).await
|
ok_or_error(response).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ bytes = "1.1.0"
|
|||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
eth2 = { path = "../../common/eth2" }
|
eth2 = { path = "../../common/eth2" }
|
||||||
kzg = { path = "../../crypto/kzg" }
|
kzg = { path = "../../crypto/kzg" }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
@ -51,3 +51,4 @@ keccak-hash = "0.10.0"
|
|||||||
hash256-std-hasher = "0.15.2"
|
hash256-std-hasher = "0.15.2"
|
||||||
triehash = "0.8.4"
|
triehash = "0.8.4"
|
||||||
hash-db = "0.15.2"
|
hash-db = "0.15.2"
|
||||||
|
pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" }
|
@ -11,6 +11,7 @@ pub use ethers_core::types::Transaction;
|
|||||||
use ethers_core::utils::rlp::{self, Decodable, Rlp};
|
use ethers_core::utils::rlp::{self, Decodable, Rlp};
|
||||||
use http::deposit_methods::RpcError;
|
use http::deposit_methods::RpcError;
|
||||||
pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
|
pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
|
||||||
|
use pretty_reqwest_error::PrettyReqwestError;
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
@ -35,7 +36,7 @@ pub type PayloadId = [u8; 8];
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
Reqwest(reqwest::Error),
|
HttpClient(PrettyReqwestError),
|
||||||
Auth(auth::Error),
|
Auth(auth::Error),
|
||||||
BadResponse(String),
|
BadResponse(String),
|
||||||
RequestFailed(String),
|
RequestFailed(String),
|
||||||
@ -70,7 +71,7 @@ impl From<reqwest::Error> for Error {
|
|||||||
) {
|
) {
|
||||||
Error::Auth(auth::Error::InvalidToken)
|
Error::Auth(auth::Error::InvalidToken)
|
||||||
} else {
|
} else {
|
||||||
Error::Reqwest(e)
|
Error::HttpClient(e.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2882,7 +2882,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
// It's reasonably likely that two different validators produce
|
// It's reasonably likely that two different validators produce
|
||||||
// identical aggregates, especially if they're using the same beacon
|
// identical aggregates, especially if they're using the same beacon
|
||||||
// node.
|
// node.
|
||||||
Err(AttnError::AttestationAlreadyKnown(_)) => continue,
|
Err(AttnError::AttestationSupersetKnown(_)) => continue,
|
||||||
// If we've already seen this aggregator produce an aggregate, just
|
// If we've already seen this aggregator produce an aggregate, just
|
||||||
// skip this one.
|
// skip this one.
|
||||||
//
|
//
|
||||||
|
@ -3,7 +3,7 @@ use crate::metrics;
|
|||||||
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
|
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
|
||||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||||
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer};
|
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer};
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
||||||
use eth2::types::SignedBlockContents;
|
use eth2::types::SignedBlockContents;
|
||||||
use execution_layer::ProvenancedPayload;
|
use execution_layer::ProvenancedPayload;
|
||||||
use lighthouse_network::PubsubMessage;
|
use lighthouse_network::PubsubMessage;
|
||||||
@ -94,12 +94,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
|||||||
let slot = block_clone.message().slot();
|
let slot = block_clone.message().slot();
|
||||||
let proposer_index = block_clone.message().proposer_index();
|
let proposer_index = block_clone.message().proposer_index();
|
||||||
match chain
|
match chain
|
||||||
.process_block(
|
.process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
wrapped_block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(AvailabilityProcessingStatus::Imported(root)) => {
|
Ok(AvailabilityProcessingStatus::Imported(root)) => {
|
||||||
|
@ -304,7 +304,7 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>(
|
|||||||
}
|
}
|
||||||
// If we already know the contribution, don't broadcast it or attempt to
|
// If we already know the contribution, don't broadcast it or attempt to
|
||||||
// further verify it. Return success.
|
// further verify it. Return success.
|
||||||
Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue,
|
Err(SyncVerificationError::SyncContributionSupersetKnown(_)) => continue,
|
||||||
// If we've already seen this aggregator produce an aggregate, just
|
// If we've already seen this aggregator produce an aggregate, just
|
||||||
// skip this one.
|
// skip this one.
|
||||||
//
|
//
|
||||||
|
@ -5,10 +5,10 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
discv5 = { version = "0.2.2", features = ["libp2p"] }
|
discv5 = { version = "0.3.0", features = ["libp2p"]}
|
||||||
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
serde_derive = "1.0.116"
|
serde_derive = "1.0.116"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
|
@ -163,7 +163,7 @@ impl Config {
|
|||||||
udp_port,
|
udp_port,
|
||||||
tcp_port,
|
tcp_port,
|
||||||
});
|
});
|
||||||
self.discv5_config.ip_mode = discv5::IpMode::Ip4;
|
self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port);
|
||||||
self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4)
|
self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,9 +176,8 @@ impl Config {
|
|||||||
udp_port,
|
udp_port,
|
||||||
tcp_port,
|
tcp_port,
|
||||||
});
|
});
|
||||||
self.discv5_config.ip_mode = discv5::IpMode::Ip6 {
|
|
||||||
enable_mapped_addresses: false,
|
self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), udp_port);
|
||||||
};
|
|
||||||
self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6)
|
self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,10 +205,10 @@ impl Config {
|
|||||||
tcp_port: tcp6_port,
|
tcp_port: tcp6_port,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
self.discv5_config.listen_config = discv5::ListenConfig::default()
|
||||||
|
.with_ipv4(v4_addr, udp4_port)
|
||||||
|
.with_ipv6(v6_addr, udp6_port);
|
||||||
|
|
||||||
self.discv5_config.ip_mode = discv5::IpMode::Ip6 {
|
|
||||||
enable_mapped_addresses: true,
|
|
||||||
};
|
|
||||||
self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) {
|
self.discv5_config.table_filter = |enr| match (&enr.ip4(), &enr.ip6()) {
|
||||||
(None, None) => false,
|
(None, None) => false,
|
||||||
(None, Some(ip6)) => is_global_ipv6(ip6),
|
(None, Some(ip6)) => is_global_ipv6(ip6),
|
||||||
@ -279,9 +278,17 @@ impl Default for Config {
|
|||||||
.build()
|
.build()
|
||||||
.expect("The total rate limit has been specified"),
|
.expect("The total rate limit has been specified"),
|
||||||
);
|
);
|
||||||
|
let listen_addresses = ListenAddress::V4(ListenAddr {
|
||||||
|
addr: Ipv4Addr::UNSPECIFIED,
|
||||||
|
udp_port: 9000,
|
||||||
|
tcp_port: 9000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let discv5_listen_config =
|
||||||
|
discv5::ListenConfig::from_ip(Ipv4Addr::UNSPECIFIED.into(), 9000);
|
||||||
|
|
||||||
// discv5 configuration
|
// discv5 configuration
|
||||||
let discv5_config = Discv5ConfigBuilder::new()
|
let discv5_config = Discv5ConfigBuilder::new(discv5_listen_config)
|
||||||
.enable_packet_filter()
|
.enable_packet_filter()
|
||||||
.session_cache_capacity(5000)
|
.session_cache_capacity(5000)
|
||||||
.request_timeout(Duration::from_secs(1))
|
.request_timeout(Duration::from_secs(1))
|
||||||
@ -304,12 +311,9 @@ impl Default for Config {
|
|||||||
// NOTE: Some of these get overridden by the corresponding CLI default values.
|
// NOTE: Some of these get overridden by the corresponding CLI default values.
|
||||||
Config {
|
Config {
|
||||||
network_dir,
|
network_dir,
|
||||||
listen_addresses: ListenAddress::V4(ListenAddr {
|
listen_addresses,
|
||||||
addr: Ipv4Addr::UNSPECIFIED,
|
|
||||||
udp_port: 9000,
|
|
||||||
tcp_port: 9000,
|
|
||||||
}),
|
|
||||||
enr_address: (None, None),
|
enr_address: (None, None),
|
||||||
|
|
||||||
enr_udp4_port: None,
|
enr_udp4_port: None,
|
||||||
enr_tcp4_port: None,
|
enr_tcp4_port: None,
|
||||||
enr_udp6_port: None,
|
enr_udp6_port: None,
|
||||||
|
@ -213,12 +213,16 @@ pub fn build_enr<T: EthSpec>(
|
|||||||
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
||||||
// take preference over disk_enr address if one is not specified
|
// take preference over disk_enr address if one is not specified
|
||||||
(local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4())
|
(local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4())
|
||||||
|
&&
|
||||||
|
(local_enr.ip6().is_none() || local_enr.ip6() == disk_enr.ip6())
|
||||||
// tcp ports must match
|
// tcp ports must match
|
||||||
&& local_enr.tcp4() == disk_enr.tcp4()
|
&& local_enr.tcp4() == disk_enr.tcp4()
|
||||||
|
&& local_enr.tcp6() == disk_enr.tcp6()
|
||||||
// must match on the same fork
|
// must match on the same fork
|
||||||
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
||||||
// take preference over disk udp port if one is not specified
|
// take preference over disk udp port if one is not specified
|
||||||
&& (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4())
|
&& (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4())
|
||||||
|
&& (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6())
|
||||||
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
||||||
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
||||||
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
||||||
|
@ -198,7 +198,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
|
|||||||
fn as_peer_id(&self) -> PeerId {
|
fn as_peer_id(&self) -> PeerId {
|
||||||
match self {
|
match self {
|
||||||
Self::Secp256k1(pk) => {
|
Self::Secp256k1(pk) => {
|
||||||
let pk_bytes = pk.to_bytes();
|
let pk_bytes = pk.to_sec1_bytes();
|
||||||
let libp2p_pk = libp2p::core::PublicKey::Secp256k1(
|
let libp2p_pk = libp2p::core::PublicKey::Secp256k1(
|
||||||
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes)
|
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes)
|
||||||
.expect("valid public key"),
|
.expect("valid public key"),
|
||||||
@ -222,14 +222,16 @@ impl CombinedKeyExt for CombinedKey {
|
|||||||
match key {
|
match key {
|
||||||
Keypair::Secp256k1(key) => {
|
Keypair::Secp256k1(key) => {
|
||||||
let secret =
|
let secret =
|
||||||
discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
|
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
|
||||||
.expect("libp2p key must be valid");
|
.expect("libp2p key must be valid");
|
||||||
Ok(CombinedKey::Secp256k1(secret))
|
Ok(CombinedKey::Secp256k1(secret))
|
||||||
}
|
}
|
||||||
Keypair::Ed25519(key) => {
|
Keypair::Ed25519(key) => {
|
||||||
let ed_keypair =
|
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
||||||
discv5::enr::ed25519_dalek::SecretKey::from_bytes(&key.encode()[..32])
|
&(key.encode()[..32])
|
||||||
.expect("libp2p key must be valid");
|
.try_into()
|
||||||
|
.expect("libp2p key must be valid"),
|
||||||
|
);
|
||||||
Ok(CombinedKey::from(ed_keypair))
|
Ok(CombinedKey::from(ed_keypair))
|
||||||
}
|
}
|
||||||
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
||||||
@ -281,7 +283,7 @@ mod tests {
|
|||||||
fn test_secp256k1_peer_id_conversion() {
|
fn test_secp256k1_peer_id_conversion() {
|
||||||
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
|
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
|
||||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||||
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
|
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap();
|
||||||
|
|
||||||
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
|
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||||
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
|
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
|
||||||
@ -300,16 +302,18 @@ mod tests {
|
|||||||
fn test_ed25519_peer_conversion() {
|
fn test_ed25519_peer_conversion() {
|
||||||
let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261";
|
let sk_hex = "4dea8a5072119927e9d243a7d953f2f4bc95b70f110978e2f9bc7a9000e4b261";
|
||||||
let sk_bytes = hex::decode(sk_hex).unwrap();
|
let sk_bytes = hex::decode(sk_hex).unwrap();
|
||||||
let secret = discv5::enr::ed25519_dalek::SecretKey::from_bytes(&sk_bytes).unwrap();
|
let secret_key = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
||||||
let public = discv5::enr::ed25519_dalek::PublicKey::from(&secret);
|
&sk_bytes.clone().try_into().unwrap(),
|
||||||
let keypair = discv5::enr::ed25519_dalek::Keypair { secret, public };
|
);
|
||||||
|
|
||||||
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap();
|
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap();
|
||||||
let ed25519_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
|
let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
|
||||||
let libp2p_kp = Keypair::Ed25519(ed25519_kp);
|
let libp2p_kp = Keypair::Ed25519(secp256k1_kp);
|
||||||
let peer_id = libp2p_kp.public().to_peer_id();
|
let peer_id = libp2p_kp.public().to_peer_id();
|
||||||
|
|
||||||
let enr = discv5::enr::EnrBuilder::new("v4").build(&keypair).unwrap();
|
let enr = discv5::enr::EnrBuilder::new("v4")
|
||||||
|
.build(&secret_key)
|
||||||
|
.unwrap();
|
||||||
let node_id = peer_id_to_node_id(&peer_id).unwrap();
|
let node_id = peer_id_to_node_id(&peer_id).unwrap();
|
||||||
|
|
||||||
assert_eq!(enr.node_id(), node_id);
|
assert_eq!(enr.node_id(), node_id);
|
||||||
|
@ -209,13 +209,6 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(),
|
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(),
|
||||||
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()
|
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()
|
||||||
);
|
);
|
||||||
let listen_socket = match config.listen_addrs() {
|
|
||||||
crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(),
|
|
||||||
crate::listen_addr::ListenAddress::V6(v6_addr) => v6_addr.udp_socket_addr(),
|
|
||||||
crate::listen_addr::ListenAddress::DualStack(_v4_addr, v6_addr) => {
|
|
||||||
v6_addr.udp_socket_addr()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// convert the keypair into an ENR key
|
// convert the keypair into an ENR key
|
||||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?;
|
let enr_key: CombinedKey = CombinedKey::from_libp2p(local_key)?;
|
||||||
@ -251,10 +244,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
// Start the discv5 service and obtain an event stream
|
// Start the discv5 service and obtain an event stream
|
||||||
let event_stream = if !config.disable_discovery {
|
let event_stream = if !config.disable_discovery {
|
||||||
discv5
|
discv5.start().map_err(|e| e.to_string()).await?;
|
||||||
.start(listen_socket)
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
.await?;
|
|
||||||
debug!(log, "Discovery service started");
|
debug!(log, "Discovery service started");
|
||||||
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
||||||
} else {
|
} else {
|
||||||
@ -413,7 +403,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
/// If the external address needs to be modified, use `update_enr_udp_socket.
|
/// If the external address needs to be modified, use `update_enr_udp_socket.
|
||||||
pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> {
|
pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> {
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("tcp", &port.to_be_bytes())
|
.enr_insert("tcp", &port)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
|
|
||||||
// replace the global version
|
// replace the global version
|
||||||
@ -428,29 +418,12 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
/// This is with caution. Discovery should automatically maintain this. This should only be
|
/// This is with caution. Discovery should automatically maintain this. This should only be
|
||||||
/// used when automatic discovery is disabled.
|
/// used when automatic discovery is disabled.
|
||||||
pub fn update_enr_udp_socket(&mut self, socket_addr: SocketAddr) -> Result<(), String> {
|
pub fn update_enr_udp_socket(&mut self, socket_addr: SocketAddr) -> Result<(), String> {
|
||||||
match socket_addr {
|
const IS_TCP: bool = false;
|
||||||
SocketAddr::V4(socket) => {
|
if self.discv5.update_local_enr_socket(socket_addr, IS_TCP) {
|
||||||
self.discv5
|
|
||||||
.enr_insert("ip", &socket.ip().octets())
|
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
|
||||||
self.discv5
|
|
||||||
.enr_insert("udp", &socket.port().to_be_bytes())
|
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
|
||||||
}
|
|
||||||
SocketAddr::V6(socket) => {
|
|
||||||
self.discv5
|
|
||||||
.enr_insert("ip6", &socket.ip().octets())
|
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
|
||||||
self.discv5
|
|
||||||
.enr_insert("udp6", &socket.port().to_be_bytes())
|
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace the global version
|
|
||||||
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
|
||||||
// persist modified enr to disk
|
// persist modified enr to disk
|
||||||
enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log);
|
enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log);
|
||||||
|
}
|
||||||
|
*self.network_globals.local_enr.write() = self.discv5.local_enr();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,8 +217,7 @@ mod tests {
|
|||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
buf.extend_from_slice(&message);
|
buf.extend_from_slice(&message);
|
||||||
|
|
||||||
let snappy_protocol_id =
|
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
|
||||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
|
||||||
|
|
||||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||||
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||||
@ -252,8 +251,7 @@ mod tests {
|
|||||||
// Insert length-prefix
|
// Insert length-prefix
|
||||||
uvi_codec.encode(len, &mut dst).unwrap();
|
uvi_codec.encode(len, &mut dst).unwrap();
|
||||||
|
|
||||||
let snappy_protocol_id =
|
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
|
||||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy);
|
|
||||||
|
|
||||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||||
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
|
||||||
@ -280,8 +278,7 @@ mod tests {
|
|||||||
dst
|
dst
|
||||||
}
|
}
|
||||||
|
|
||||||
let protocol_id =
|
let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy);
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy);
|
|
||||||
|
|
||||||
// Response limits
|
// Response limits
|
||||||
let fork_context = Arc::new(fork_context(ForkName::Base));
|
let fork_context = Arc::new(fork_context(ForkName::Base));
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -245,7 +245,7 @@ where
|
|||||||
while let Some((id, req)) = self.dial_queue.pop() {
|
while let Some((id, req)) = self.dial_queue.pop() {
|
||||||
self.events_out.push(Err(HandlerErr::Outbound {
|
self.events_out.push(Err(HandlerErr::Outbound {
|
||||||
error: RPCError::Disconnected,
|
error: RPCError::Disconnected,
|
||||||
proto: req.protocol(),
|
proto: req.versioned_protocol().protocol(),
|
||||||
id,
|
id,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
@ -269,7 +269,7 @@ where
|
|||||||
}
|
}
|
||||||
_ => self.events_out.push(Err(HandlerErr::Outbound {
|
_ => self.events_out.push(Err(HandlerErr::Outbound {
|
||||||
error: RPCError::Disconnected,
|
error: RPCError::Disconnected,
|
||||||
proto: req.protocol(),
|
proto: req.versioned_protocol().protocol(),
|
||||||
id,
|
id,
|
||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
@ -334,7 +334,7 @@ where
|
|||||||
) {
|
) {
|
||||||
self.dial_negotiated -= 1;
|
self.dial_negotiated -= 1;
|
||||||
let (id, request) = request_info;
|
let (id, request) = request_info;
|
||||||
let proto = request.protocol();
|
let proto = request.versioned_protocol().protocol();
|
||||||
|
|
||||||
// accept outbound connections only if the handler is not deactivated
|
// accept outbound connections only if the handler is not deactivated
|
||||||
if matches!(self.state, HandlerState::Deactivated) {
|
if matches!(self.state, HandlerState::Deactivated) {
|
||||||
@ -414,7 +414,7 @@ where
|
|||||||
128,
|
128,
|
||||||
) as usize),
|
) as usize),
|
||||||
delay_key: Some(delay_key),
|
delay_key: Some(delay_key),
|
||||||
protocol: req.protocol(),
|
protocol: req.versioned_protocol().protocol(),
|
||||||
request_start_time: Instant::now(),
|
request_start_time: Instant::now(),
|
||||||
remaining_chunks: expected_responses,
|
remaining_chunks: expected_responses,
|
||||||
},
|
},
|
||||||
@ -422,7 +422,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
self.events_out.push(Err(HandlerErr::Inbound {
|
self.events_out.push(Err(HandlerErr::Inbound {
|
||||||
id: self.current_inbound_substream_id,
|
id: self.current_inbound_substream_id,
|
||||||
proto: req.protocol(),
|
proto: req.versioned_protocol().protocol(),
|
||||||
error: RPCError::HandlerRejected,
|
error: RPCError::HandlerRejected,
|
||||||
}));
|
}));
|
||||||
return self.shutdown(None);
|
return self.shutdown(None);
|
||||||
@ -498,7 +498,7 @@ where
|
|||||||
};
|
};
|
||||||
self.events_out.push(Err(HandlerErr::Outbound {
|
self.events_out.push(Err(HandlerErr::Outbound {
|
||||||
error,
|
error,
|
||||||
proto: req.protocol(),
|
proto: req.versioned_protocol().protocol(),
|
||||||
id,
|
id,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
@ -895,7 +895,7 @@ where
|
|||||||
// else we return an error, stream should not have closed early.
|
// else we return an error, stream should not have closed early.
|
||||||
let outbound_err = HandlerErr::Outbound {
|
let outbound_err = HandlerErr::Outbound {
|
||||||
id: request_id,
|
id: request_id,
|
||||||
proto: request.protocol(),
|
proto: request.versioned_protocol().protocol(),
|
||||||
error: RPCError::IncompleteStream,
|
error: RPCError::IncompleteStream,
|
||||||
};
|
};
|
||||||
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
|
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
|
||||||
|
@ -3,11 +3,13 @@
|
|||||||
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield};
|
||||||
use regex::bytes::Regex;
|
use regex::bytes::Regex;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use ssz::Encode;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use ssz_types::{
|
use ssz_types::{
|
||||||
typenum::{U1024, U256, U768},
|
typenum::{U1024, U256, U768},
|
||||||
VariableList,
|
VariableList,
|
||||||
};
|
};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
@ -96,6 +98,30 @@ pub struct Ping {
|
|||||||
pub data: u64,
|
pub data: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The METADATA request structure.
|
||||||
|
#[superstruct(
|
||||||
|
variants(V1, V2),
|
||||||
|
variant_attributes(derive(Clone, Debug, PartialEq, Serialize),)
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct MetadataRequest<T: EthSpec> {
|
||||||
|
_phantom_data: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> MetadataRequest<T> {
|
||||||
|
pub fn new_v1() -> Self {
|
||||||
|
Self::V1(MetadataRequestV1 {
|
||||||
|
_phantom_data: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_v2() -> Self {
|
||||||
|
Self::V2(MetadataRequestV2 {
|
||||||
|
_phantom_data: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The METADATA response structure.
|
/// The METADATA response structure.
|
||||||
#[superstruct(
|
#[superstruct(
|
||||||
variants(V1, V2),
|
variants(V1, V2),
|
||||||
@ -104,9 +130,8 @@ pub struct Ping {
|
|||||||
serde(bound = "T: EthSpec", deny_unknown_fields),
|
serde(bound = "T: EthSpec", deny_unknown_fields),
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Encode)]
|
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||||
#[serde(bound = "T: EthSpec")]
|
#[serde(bound = "T: EthSpec")]
|
||||||
#[ssz(enum_behaviour = "transparent")]
|
|
||||||
pub struct MetaData<T: EthSpec> {
|
pub struct MetaData<T: EthSpec> {
|
||||||
/// A sequential counter indicating when data gets modified.
|
/// A sequential counter indicating when data gets modified.
|
||||||
pub seq_number: u64,
|
pub seq_number: u64,
|
||||||
@ -117,6 +142,38 @@ pub struct MetaData<T: EthSpec> {
|
|||||||
pub syncnets: EnrSyncCommitteeBitfield<T>,
|
pub syncnets: EnrSyncCommitteeBitfield<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> MetaData<T> {
|
||||||
|
/// Returns a V1 MetaData response from self.
|
||||||
|
pub fn metadata_v1(&self) -> Self {
|
||||||
|
match self {
|
||||||
|
md @ MetaData::V1(_) => md.clone(),
|
||||||
|
MetaData::V2(metadata) => MetaData::V1(MetaDataV1 {
|
||||||
|
seq_number: metadata.seq_number,
|
||||||
|
attnets: metadata.attnets.clone(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a V2 MetaData response from self by filling unavailable fields with default.
|
||||||
|
pub fn metadata_v2(&self) -> Self {
|
||||||
|
match self {
|
||||||
|
MetaData::V1(metadata) => MetaData::V2(MetaDataV2 {
|
||||||
|
seq_number: metadata.seq_number,
|
||||||
|
attnets: metadata.attnets.clone(),
|
||||||
|
syncnets: Default::default(),
|
||||||
|
}),
|
||||||
|
md @ MetaData::V2(_) => md.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_ssz_bytes(&self) -> Vec<u8> {
|
||||||
|
match self {
|
||||||
|
MetaData::V1(md) => md.as_ssz_bytes(),
|
||||||
|
MetaData::V2(md) => md.as_ssz_bytes(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The reason given for a `Goodbye` message.
|
/// The reason given for a `Goodbye` message.
|
||||||
///
|
///
|
||||||
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
|
/// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`,
|
||||||
@ -208,7 +265,11 @@ impl ssz::Decode for GoodbyeReason {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Request a number of beacon block roots from a peer.
|
/// Request a number of beacon block roots from a peer.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
#[superstruct(
|
||||||
|
variants(V1, V2),
|
||||||
|
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct BlocksByRangeRequest {
|
pub struct BlocksByRangeRequest {
|
||||||
/// The starting slot to request blocks.
|
/// The starting slot to request blocks.
|
||||||
pub start_slot: u64,
|
pub start_slot: u64,
|
||||||
@ -217,6 +278,17 @@ pub struct BlocksByRangeRequest {
|
|||||||
pub count: u64,
|
pub count: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BlocksByRangeRequest {
|
||||||
|
/// The default request is V2
|
||||||
|
pub fn new(start_slot: u64, count: u64) -> Self {
|
||||||
|
Self::V2(BlocksByRangeRequestV2 { start_slot, count })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_v1(start_slot: u64, count: u64) -> Self {
|
||||||
|
Self::V1(BlocksByRangeRequestV1 { start_slot, count })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Request a number of beacon blobs from a peer.
|
/// Request a number of beacon blobs from a peer.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||||
pub struct BlobsByRangeRequest {
|
pub struct BlobsByRangeRequest {
|
||||||
@ -228,7 +300,11 @@ pub struct BlobsByRangeRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Request a number of beacon block roots from a peer.
|
/// Request a number of beacon block roots from a peer.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
#[superstruct(
|
||||||
|
variants(V1, V2),
|
||||||
|
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct OldBlocksByRangeRequest {
|
pub struct OldBlocksByRangeRequest {
|
||||||
/// The starting slot to request blocks.
|
/// The starting slot to request blocks.
|
||||||
pub start_slot: u64,
|
pub start_slot: u64,
|
||||||
@ -244,13 +320,46 @@ pub struct OldBlocksByRangeRequest {
|
|||||||
pub step: u64,
|
pub step: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl OldBlocksByRangeRequest {
|
||||||
|
/// The default request is V2
|
||||||
|
pub fn new(start_slot: u64, count: u64, step: u64) -> Self {
|
||||||
|
Self::V2(OldBlocksByRangeRequestV2 {
|
||||||
|
start_slot,
|
||||||
|
count,
|
||||||
|
step,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_v1(start_slot: u64, count: u64, step: u64) -> Self {
|
||||||
|
Self::V1(OldBlocksByRangeRequestV1 {
|
||||||
|
start_slot,
|
||||||
|
count,
|
||||||
|
step,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Request a number of beacon block bodies from a peer.
|
/// Request a number of beacon block bodies from a peer.
|
||||||
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
#[superstruct(
|
||||||
|
variants(V1, V2),
|
||||||
|
variant_attributes(derive(Encode, Decode, Clone, Debug, PartialEq))
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct BlocksByRootRequest {
|
pub struct BlocksByRootRequest {
|
||||||
/// The list of beacon block bodies being requested.
|
/// The list of beacon block bodies being requested.
|
||||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BlocksByRootRequest {
|
||||||
|
pub fn new(block_roots: VariableList<Hash256, MaxRequestBlocks>) -> Self {
|
||||||
|
Self::V2(BlocksByRootRequestV2 { block_roots })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_v1(block_roots: VariableList<Hash256, MaxRequestBlocks>) -> Self {
|
||||||
|
Self::V1(BlocksByRootRequestV1 { block_roots })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Request a number of beacon blocks and blobs from a peer.
|
/// Request a number of beacon blocks and blobs from a peer.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct BlobsByRootRequest {
|
pub struct BlobsByRootRequest {
|
||||||
@ -492,7 +601,12 @@ impl std::fmt::Display for GoodbyeReason {
|
|||||||
|
|
||||||
impl std::fmt::Display for BlocksByRangeRequest {
|
impl std::fmt::Display for BlocksByRangeRequest {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count)
|
write!(
|
||||||
|
f,
|
||||||
|
"Start Slot: {}, Count: {}",
|
||||||
|
self.start_slot(),
|
||||||
|
self.count()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,7 +615,9 @@ impl std::fmt::Display for OldBlocksByRangeRequest {
|
|||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"Start Slot: {}, Count: {}, Step: {}",
|
"Start Slot: {}, Count: {}, Step: {}",
|
||||||
self.start_slot, self.count, self.step
|
self.start_slot(),
|
||||||
|
self.count(),
|
||||||
|
self.step()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -247,7 +247,7 @@ where
|
|||||||
}
|
}
|
||||||
Err(RateLimitedErr::TooLarge) => {
|
Err(RateLimitedErr::TooLarge) => {
|
||||||
// we set the batch sizes, so this is a coding/config err for most protocols
|
// we set the batch sizes, so this is a coding/config err for most protocols
|
||||||
let protocol = req.protocol();
|
let protocol = req.versioned_protocol().protocol();
|
||||||
if matches!(protocol, Protocol::BlocksByRange) {
|
if matches!(protocol, Protocol::BlocksByRange) {
|
||||||
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
|
debug!(self.log, "Blocks by range request will never be processed"; "request" => %req);
|
||||||
} else {
|
} else {
|
||||||
@ -335,7 +335,7 @@ where
|
|||||||
serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?;
|
serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?;
|
||||||
let (msg_kind, protocol) = match &self.event {
|
let (msg_kind, protocol) = match &self.event {
|
||||||
Ok(received) => match received {
|
Ok(received) => match received {
|
||||||
RPCReceived::Request(_, req) => ("request", req.protocol()),
|
RPCReceived::Request(_, req) => ("request", req.versioned_protocol().protocol()),
|
||||||
RPCReceived::Response(_, res) => ("response", res.protocol()),
|
RPCReceived::Response(_, res) => ("response", res.protocol()),
|
||||||
RPCReceived::EndOfStream(_, end) => (
|
RPCReceived::EndOfStream(_, end) => (
|
||||||
"end_of_stream",
|
"end_of_stream",
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use super::methods::*;
|
use super::methods::*;
|
||||||
use super::protocol::Protocol;
|
|
||||||
use super::protocol::ProtocolId;
|
use super::protocol::ProtocolId;
|
||||||
|
use super::protocol::SupportedProtocol;
|
||||||
use super::RPCError;
|
use super::RPCError;
|
||||||
use crate::rpc::protocol::Encoding;
|
use crate::rpc::protocol::Encoding;
|
||||||
use crate::rpc::protocol::Version;
|
|
||||||
use crate::rpc::{
|
use crate::rpc::{
|
||||||
codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec},
|
codec::{base::BaseOutboundCodec, ssz_snappy::SSZSnappyOutboundCodec, OutboundCodec},
|
||||||
methods::ResponseTermination,
|
methods::ResponseTermination,
|
||||||
@ -40,9 +37,8 @@ pub enum OutboundRequest<TSpec: EthSpec> {
|
|||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
BlobsByRoot(BlobsByRootRequest),
|
BlobsByRoot(BlobsByRootRequest),
|
||||||
LightClientBootstrap(LightClientBootstrapRequest),
|
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(MetadataRequest<TSpec>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> UpgradeInfo for OutboundRequestContainer<TSpec> {
|
impl<TSpec: EthSpec> UpgradeInfo for OutboundRequestContainer<TSpec> {
|
||||||
@ -61,46 +57,37 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
match self {
|
match self {
|
||||||
// add more protocols when versions/encodings are supported
|
// add more protocols when versions/encodings are supported
|
||||||
OutboundRequest::Status(_) => vec![ProtocolId::new(
|
OutboundRequest::Status(_) => vec![ProtocolId::new(
|
||||||
Protocol::Status,
|
SupportedProtocol::StatusV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
OutboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
||||||
Protocol::Goodbye,
|
SupportedProtocol::GoodbyeV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::BlocksByRange(_) => vec![
|
OutboundRequest::BlocksByRange(_) => vec![
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlocksByRangeV2, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy),
|
||||||
],
|
],
|
||||||
OutboundRequest::BlocksByRoot(_) => vec![
|
OutboundRequest::BlocksByRoot(_) => vec![
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlocksByRootV2, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlocksByRootV1, Encoding::SSZSnappy),
|
||||||
],
|
],
|
||||||
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
|
OutboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
|
||||||
Protocol::BlobsByRange,
|
SupportedProtocol::BlobsByRangeV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new(
|
OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new(
|
||||||
Protocol::BlobsByRoot,
|
SupportedProtocol::BlobsByRootV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||||
Protocol::Ping,
|
SupportedProtocol::PingV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
OutboundRequest::MetaData(_) => vec![
|
OutboundRequest::MetaData(_) => vec![
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::MetaDataV2, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::MetaDataV1, Encoding::SSZSnappy),
|
||||||
],
|
],
|
||||||
// Note: This match arm is technically unreachable as we only respond to light client requests
|
|
||||||
// that we generate from the beacon state.
|
|
||||||
// We do not make light client rpc requests from the beacon node
|
|
||||||
OutboundRequest::LightClientBootstrap(_) => vec![],
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* These functions are used in the handler for stream management */
|
/* These functions are used in the handler for stream management */
|
||||||
@ -110,28 +97,35 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
match self {
|
match self {
|
||||||
OutboundRequest::Status(_) => 1,
|
OutboundRequest::Status(_) => 1,
|
||||||
OutboundRequest::Goodbye(_) => 0,
|
OutboundRequest::Goodbye(_) => 0,
|
||||||
OutboundRequest::BlocksByRange(req) => req.count,
|
OutboundRequest::BlocksByRange(req) => *req.count(),
|
||||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||||
OutboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
OutboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
||||||
OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||||
OutboundRequest::Ping(_) => 1,
|
OutboundRequest::Ping(_) => 1,
|
||||||
OutboundRequest::MetaData(_) => 1,
|
OutboundRequest::MetaData(_) => 1,
|
||||||
OutboundRequest::LightClientBootstrap(_) => 1,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gives the corresponding `Protocol` to this request.
|
/// Gives the corresponding `SupportedProtocol` to this request.
|
||||||
pub fn protocol(&self) -> Protocol {
|
pub fn versioned_protocol(&self) -> SupportedProtocol {
|
||||||
match self {
|
match self {
|
||||||
OutboundRequest::Status(_) => Protocol::Status,
|
OutboundRequest::Status(_) => SupportedProtocol::StatusV1,
|
||||||
OutboundRequest::Goodbye(_) => Protocol::Goodbye,
|
OutboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1,
|
||||||
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
OutboundRequest::BlocksByRange(req) => match req {
|
||||||
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1,
|
||||||
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2,
|
||||||
OutboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
},
|
||||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
OutboundRequest::BlocksByRoot(req) => match req {
|
||||||
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||||
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||||
|
},
|
||||||
|
OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||||
|
OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||||
|
OutboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||||
|
OutboundRequest::MetaData(req) => match req {
|
||||||
|
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||||
|
MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +139,6 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||||
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||||
OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||||
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
|
|
||||||
OutboundRequest::Status(_) => unreachable!(),
|
OutboundRequest::Status(_) => unreachable!(),
|
||||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||||
OutboundRequest::Ping(_) => unreachable!(),
|
OutboundRequest::Ping(_) => unreachable!(),
|
||||||
@ -205,9 +198,6 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||||
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
|
||||||
write!(f, "Lightclient Bootstrap: {}", bootstrap.root)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -205,21 +205,6 @@ pub enum Protocol {
|
|||||||
LightClientBootstrap,
|
LightClientBootstrap,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// RPC Versions
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub enum Version {
|
|
||||||
/// Version 1 of RPC
|
|
||||||
V1,
|
|
||||||
/// Version 2 of RPC
|
|
||||||
V2,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// RPC Encondings supported.
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub enum Encoding {
|
|
||||||
SSZSnappy,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Protocol {
|
impl Protocol {
|
||||||
pub(crate) fn terminator(self) -> Option<ResponseTermination> {
|
pub(crate) fn terminator(self) -> Option<ResponseTermination> {
|
||||||
match self {
|
match self {
|
||||||
@ -236,6 +221,82 @@ impl Protocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// RPC Encondings supported.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Encoding {
|
||||||
|
SSZSnappy,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All valid protocol name and version combinations.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
pub enum SupportedProtocol {
|
||||||
|
StatusV1,
|
||||||
|
GoodbyeV1,
|
||||||
|
BlocksByRangeV1,
|
||||||
|
BlocksByRangeV2,
|
||||||
|
BlocksByRootV1,
|
||||||
|
BlocksByRootV2,
|
||||||
|
BlobsByRangeV1,
|
||||||
|
BlobsByRootV1,
|
||||||
|
PingV1,
|
||||||
|
MetaDataV1,
|
||||||
|
MetaDataV2,
|
||||||
|
LightClientBootstrapV1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SupportedProtocol {
|
||||||
|
pub fn version_string(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
SupportedProtocol::StatusV1 => "1",
|
||||||
|
SupportedProtocol::GoodbyeV1 => "1",
|
||||||
|
SupportedProtocol::BlocksByRangeV1 => "1",
|
||||||
|
SupportedProtocol::BlocksByRangeV2 => "2",
|
||||||
|
SupportedProtocol::BlocksByRootV1 => "1",
|
||||||
|
SupportedProtocol::BlocksByRootV2 => "2",
|
||||||
|
SupportedProtocol::BlobsByRangeV1 => "1",
|
||||||
|
SupportedProtocol::BlobsByRootV1 => "1",
|
||||||
|
SupportedProtocol::PingV1 => "1",
|
||||||
|
SupportedProtocol::MetaDataV1 => "1",
|
||||||
|
SupportedProtocol::MetaDataV2 => "2",
|
||||||
|
SupportedProtocol::LightClientBootstrapV1 => "1",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn protocol(&self) -> Protocol {
|
||||||
|
match self {
|
||||||
|
SupportedProtocol::StatusV1 => Protocol::Status,
|
||||||
|
SupportedProtocol::GoodbyeV1 => Protocol::Goodbye,
|
||||||
|
SupportedProtocol::BlocksByRangeV1 => Protocol::BlocksByRange,
|
||||||
|
SupportedProtocol::BlocksByRangeV2 => Protocol::BlocksByRange,
|
||||||
|
SupportedProtocol::BlocksByRootV1 => Protocol::BlocksByRoot,
|
||||||
|
SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot,
|
||||||
|
SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange,
|
||||||
|
SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot,
|
||||||
|
SupportedProtocol::PingV1 => Protocol::Ping,
|
||||||
|
SupportedProtocol::MetaDataV1 => Protocol::MetaData,
|
||||||
|
SupportedProtocol::MetaDataV2 => Protocol::MetaData,
|
||||||
|
SupportedProtocol::LightClientBootstrapV1 => Protocol::LightClientBootstrap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn currently_supported() -> Vec<ProtocolId> {
|
||||||
|
vec![
|
||||||
|
ProtocolId::new(Self::StatusV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::GoodbyeV1, Encoding::SSZSnappy),
|
||||||
|
// V2 variants have higher preference then V1
|
||||||
|
ProtocolId::new(Self::BlocksByRangeV2, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::BlocksByRangeV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::BlocksByRootV2, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::BlocksByRootV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::BlobsByRangeV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::BlobsByRootV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::PingV1, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::MetaDataV2, Encoding::SSZSnappy),
|
||||||
|
ProtocolId::new(Self::MetaDataV1, Encoding::SSZSnappy),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Encoding {
|
impl std::fmt::Display for Encoding {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let repr = match self {
|
let repr = match self {
|
||||||
@ -245,16 +306,6 @@ impl std::fmt::Display for Encoding {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Version {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let repr = match self {
|
|
||||||
Version::V1 => "1",
|
|
||||||
Version::V2 => "2",
|
|
||||||
};
|
|
||||||
f.write_str(repr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCProtocol<TSpec: EthSpec> {
|
pub struct RPCProtocol<TSpec: EthSpec> {
|
||||||
pub fork_context: Arc<ForkContext>,
|
pub fork_context: Arc<ForkContext>,
|
||||||
@ -269,30 +320,17 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
|||||||
|
|
||||||
/// The list of supported RPC protocols for Lighthouse.
|
/// The list of supported RPC protocols for Lighthouse.
|
||||||
fn protocol_info(&self) -> Self::InfoIter {
|
fn protocol_info(&self) -> Self::InfoIter {
|
||||||
let mut supported_protocols = vec![
|
let mut supported_protocols = SupportedProtocol::currently_supported();
|
||||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
|
||||||
// V2 variants have higher preference then V1
|
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
|
||||||
];
|
|
||||||
|
|
||||||
if let ForkName::Deneb = self.fork_context.current_fork() {
|
if let ForkName::Deneb = self.fork_context.current_fork() {
|
||||||
supported_protocols.extend_from_slice(&[
|
supported_protocols.extend_from_slice(&[
|
||||||
ProtocolId::new(Protocol::BlobsByRoot, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.enable_light_client_server {
|
if self.enable_light_client_server {
|
||||||
supported_protocols.push(ProtocolId::new(
|
supported_protocols.push(ProtocolId::new(
|
||||||
Protocol::LightClientBootstrap,
|
SupportedProtocol::LightClientBootstrapV1,
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -322,11 +360,8 @@ impl RpcLimits {
|
|||||||
/// Tracks the types in a protocol id.
|
/// Tracks the types in a protocol id.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct ProtocolId {
|
pub struct ProtocolId {
|
||||||
/// The RPC message type/name.
|
/// The protocol name and version
|
||||||
pub message_name: Protocol,
|
pub versioned_protocol: SupportedProtocol,
|
||||||
|
|
||||||
/// The version of the RPC.
|
|
||||||
pub version: Version,
|
|
||||||
|
|
||||||
/// The encoding of the RPC.
|
/// The encoding of the RPC.
|
||||||
pub encoding: Encoding,
|
pub encoding: Encoding,
|
||||||
@ -338,7 +373,7 @@ pub struct ProtocolId {
|
|||||||
impl ProtocolId {
|
impl ProtocolId {
|
||||||
/// Returns min and max size for messages of given protocol id requests.
|
/// Returns min and max size for messages of given protocol id requests.
|
||||||
pub fn rpc_request_limits(&self) -> RpcLimits {
|
pub fn rpc_request_limits(&self) -> RpcLimits {
|
||||||
match self.message_name {
|
match self.versioned_protocol.protocol() {
|
||||||
Protocol::Status => RpcLimits::new(
|
Protocol::Status => RpcLimits::new(
|
||||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||||
@ -347,9 +382,10 @@ impl ProtocolId {
|
|||||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||||
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
<GoodbyeReason as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
|
// V1 and V2 requests are the same
|
||||||
Protocol::BlocksByRange => RpcLimits::new(
|
Protocol::BlocksByRange => RpcLimits::new(
|
||||||
<OldBlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
<OldBlocksByRangeRequestV2 as Encode>::ssz_fixed_len(),
|
||||||
<OldBlocksByRangeRequest as Encode>::ssz_fixed_len(),
|
<OldBlocksByRangeRequestV2 as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
Protocol::BlocksByRoot => {
|
Protocol::BlocksByRoot => {
|
||||||
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
||||||
@ -376,7 +412,7 @@ impl ProtocolId {
|
|||||||
|
|
||||||
/// Returns min and max size for messages of given protocol id responses.
|
/// Returns min and max size for messages of given protocol id responses.
|
||||||
pub fn rpc_response_limits<T: EthSpec>(&self, fork_context: &ForkContext) -> RpcLimits {
|
pub fn rpc_response_limits<T: EthSpec>(&self, fork_context: &ForkContext) -> RpcLimits {
|
||||||
match self.message_name {
|
match self.versioned_protocol.protocol() {
|
||||||
Protocol::Status => RpcLimits::new(
|
Protocol::Status => RpcLimits::new(
|
||||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||||
<StatusMessage as Encode>::ssz_fixed_len(),
|
<StatusMessage as Encode>::ssz_fixed_len(),
|
||||||
@ -385,10 +421,7 @@ impl ProtocolId {
|
|||||||
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
Protocol::BlobsByRange => RpcLimits::new(*BLOB_SIDECAR_MIN, *BLOB_SIDECAR_MAX),
|
Protocol::BlobsByRange => RpcLimits::new(*BLOB_SIDECAR_MIN, *BLOB_SIDECAR_MAX),
|
||||||
Protocol::BlobsByRoot => {
|
Protocol::BlobsByRoot => RpcLimits::new(*BLOB_SIDECAR_MIN, *BLOB_SIDECAR_MAX),
|
||||||
// TODO: wrong too
|
|
||||||
RpcLimits::new(*SIGNED_BLOCK_AND_BLOBS_MIN, *SIGNED_BLOCK_AND_BLOBS_MAX)
|
|
||||||
}
|
|
||||||
Protocol::Ping => RpcLimits::new(
|
Protocol::Ping => RpcLimits::new(
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
@ -407,31 +440,36 @@ impl ProtocolId {
|
|||||||
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
|
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
|
||||||
/// beginning of the stream, else returns `false`.
|
/// beginning of the stream, else returns `false`.
|
||||||
pub fn has_context_bytes(&self) -> bool {
|
pub fn has_context_bytes(&self) -> bool {
|
||||||
match self.message_name {
|
match self.versioned_protocol {
|
||||||
Protocol::BlocksByRange | Protocol::BlocksByRoot => match self.version {
|
SupportedProtocol::BlocksByRangeV2
|
||||||
Version::V2 => true,
|
| SupportedProtocol::BlocksByRootV2
|
||||||
Version::V1 => false,
|
| SupportedProtocol::BlobsByRangeV1
|
||||||
},
|
| SupportedProtocol::BlobsByRootV1
|
||||||
Protocol::LightClientBootstrap => match self.version {
|
| SupportedProtocol::LightClientBootstrapV1 => true,
|
||||||
Version::V2 | Version::V1 => true,
|
SupportedProtocol::StatusV1
|
||||||
},
|
| SupportedProtocol::BlocksByRootV1
|
||||||
Protocol::BlobsByRoot | Protocol::BlobsByRange => true,
|
| SupportedProtocol::BlocksByRangeV1
|
||||||
Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false,
|
| SupportedProtocol::PingV1
|
||||||
|
| SupportedProtocol::MetaDataV1
|
||||||
|
| SupportedProtocol::MetaDataV2
|
||||||
|
| SupportedProtocol::GoodbyeV1 => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An RPC protocol ID.
|
/// An RPC protocol ID.
|
||||||
impl ProtocolId {
|
impl ProtocolId {
|
||||||
pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self {
|
pub fn new(versioned_protocol: SupportedProtocol, encoding: Encoding) -> Self {
|
||||||
let protocol_id = format!(
|
let protocol_id = format!(
|
||||||
"{}/{}/{}/{}",
|
"{}/{}/{}/{}",
|
||||||
PROTOCOL_PREFIX, message_name, version, encoding
|
PROTOCOL_PREFIX,
|
||||||
|
versioned_protocol.protocol(),
|
||||||
|
versioned_protocol.version_string(),
|
||||||
|
encoding
|
||||||
);
|
);
|
||||||
|
|
||||||
ProtocolId {
|
ProtocolId {
|
||||||
message_name,
|
versioned_protocol,
|
||||||
version,
|
|
||||||
encoding,
|
encoding,
|
||||||
protocol_id,
|
protocol_id,
|
||||||
}
|
}
|
||||||
@ -464,7 +502,7 @@ where
|
|||||||
|
|
||||||
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
|
fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future {
|
||||||
async move {
|
async move {
|
||||||
let protocol_name = protocol.message_name;
|
let versioned_protocol = protocol.versioned_protocol;
|
||||||
// convert the socket to tokio compatible socket
|
// convert the socket to tokio compatible socket
|
||||||
let socket = socket.compat();
|
let socket = socket.compat();
|
||||||
let codec = match protocol.encoding {
|
let codec = match protocol.encoding {
|
||||||
@ -483,8 +521,13 @@ where
|
|||||||
let socket = Framed::new(Box::pin(timed_socket), codec);
|
let socket = Framed::new(Box::pin(timed_socket), codec);
|
||||||
|
|
||||||
// MetaData requests should be empty, return the stream
|
// MetaData requests should be empty, return the stream
|
||||||
match protocol_name {
|
match versioned_protocol {
|
||||||
Protocol::MetaData => Ok((InboundRequest::MetaData(PhantomData), socket)),
|
SupportedProtocol::MetaDataV1 => {
|
||||||
|
Ok((InboundRequest::MetaData(MetadataRequest::new_v1()), socket))
|
||||||
|
}
|
||||||
|
SupportedProtocol::MetaDataV2 => {
|
||||||
|
Ok((InboundRequest::MetaData(MetadataRequest::new_v2()), socket))
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
match tokio::time::timeout(
|
match tokio::time::timeout(
|
||||||
Duration::from_secs(REQUEST_TIMEOUT),
|
Duration::from_secs(REQUEST_TIMEOUT),
|
||||||
@ -514,7 +557,7 @@ pub enum InboundRequest<TSpec: EthSpec> {
|
|||||||
BlobsByRoot(BlobsByRootRequest),
|
BlobsByRoot(BlobsByRootRequest),
|
||||||
LightClientBootstrap(LightClientBootstrapRequest),
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(MetadataRequest<TSpec>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the encoding per supported protocol for `RPCRequest`.
|
/// Implements the encoding per supported protocol for `RPCRequest`.
|
||||||
@ -526,8 +569,8 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
match self {
|
match self {
|
||||||
InboundRequest::Status(_) => 1,
|
InboundRequest::Status(_) => 1,
|
||||||
InboundRequest::Goodbye(_) => 0,
|
InboundRequest::Goodbye(_) => 0,
|
||||||
InboundRequest::BlocksByRange(req) => req.count,
|
InboundRequest::BlocksByRange(req) => *req.count(),
|
||||||
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64,
|
||||||
InboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
InboundRequest::BlobsByRange(req) => req.count * TSpec::max_blobs_per_block() as u64,
|
||||||
InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64,
|
||||||
InboundRequest::Ping(_) => 1,
|
InboundRequest::Ping(_) => 1,
|
||||||
@ -536,18 +579,27 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gives the corresponding `Protocol` to this request.
|
/// Gives the corresponding `SupportedProtocol` to this request.
|
||||||
pub fn protocol(&self) -> Protocol {
|
pub fn versioned_protocol(&self) -> SupportedProtocol {
|
||||||
match self {
|
match self {
|
||||||
InboundRequest::Status(_) => Protocol::Status,
|
InboundRequest::Status(_) => SupportedProtocol::StatusV1,
|
||||||
InboundRequest::Goodbye(_) => Protocol::Goodbye,
|
InboundRequest::Goodbye(_) => SupportedProtocol::GoodbyeV1,
|
||||||
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
InboundRequest::BlocksByRange(req) => match req {
|
||||||
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
OldBlocksByRangeRequest::V1(_) => SupportedProtocol::BlocksByRangeV1,
|
||||||
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
OldBlocksByRangeRequest::V2(_) => SupportedProtocol::BlocksByRangeV2,
|
||||||
InboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
},
|
||||||
InboundRequest::Ping(_) => Protocol::Ping,
|
InboundRequest::BlocksByRoot(req) => match req {
|
||||||
InboundRequest::MetaData(_) => Protocol::MetaData,
|
BlocksByRootRequest::V1(_) => SupportedProtocol::BlocksByRootV1,
|
||||||
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
BlocksByRootRequest::V2(_) => SupportedProtocol::BlocksByRootV2,
|
||||||
|
},
|
||||||
|
InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1,
|
||||||
|
InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1,
|
||||||
|
InboundRequest::Ping(_) => SupportedProtocol::PingV1,
|
||||||
|
InboundRequest::MetaData(req) => match req {
|
||||||
|
MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1,
|
||||||
|
MetadataRequest::V2(_) => SupportedProtocol::MetaDataV2,
|
||||||
|
},
|
||||||
|
InboundRequest::LightClientBootstrap(_) => SupportedProtocol::LightClientBootstrapV1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +214,7 @@ pub trait RateLimiterItem {
|
|||||||
|
|
||||||
impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
||||||
fn protocol(&self) -> Protocol {
|
fn protocol(&self) -> Protocol {
|
||||||
self.protocol()
|
self.versioned_protocol().protocol()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expected_responses(&self) -> u64 {
|
fn expected_responses(&self) -> u64 {
|
||||||
@ -224,7 +224,7 @@ impl<T: EthSpec> RateLimiterItem for super::InboundRequest<T> {
|
|||||||
|
|
||||||
impl<T: EthSpec> RateLimiterItem for super::OutboundRequest<T> {
|
impl<T: EthSpec> RateLimiterItem for super::OutboundRequest<T> {
|
||||||
fn protocol(&self) -> Protocol {
|
fn protocol(&self) -> Protocol {
|
||||||
self.protocol()
|
self.versioned_protocol().protocol()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expected_responses(&self) -> u64 {
|
fn expected_responses(&self) -> u64 {
|
||||||
|
@ -72,7 +72,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
|||||||
request_id: Id,
|
request_id: Id,
|
||||||
req: OutboundRequest<TSpec>,
|
req: OutboundRequest<TSpec>,
|
||||||
) -> Result<BehaviourAction<Id, TSpec>, Error> {
|
) -> Result<BehaviourAction<Id, TSpec>, Error> {
|
||||||
let protocol = req.protocol();
|
let protocol = req.versioned_protocol().protocol();
|
||||||
// First check that there are not already other requests waiting to be sent.
|
// First check that there are not already other requests waiting to be sent.
|
||||||
if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) {
|
if let Some(queued_requests) = self.delayed_requests.get_mut(&(peer_id, protocol)) {
|
||||||
queued_requests.push_back(QueuedRequest { req, request_id });
|
queued_requests.push_back(QueuedRequest { req, request_id });
|
||||||
@ -111,7 +111,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
|||||||
event: RPCSend::Request(request_id, req),
|
event: RPCSend::Request(request_id, req),
|
||||||
}),
|
}),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let protocol = req.protocol();
|
let protocol = req.versioned_protocol();
|
||||||
match e {
|
match e {
|
||||||
RateLimitedErr::TooLarge => {
|
RateLimitedErr::TooLarge => {
|
||||||
// this should never happen with default parameters. Let's just send the request.
|
// this should never happen with default parameters. Let's just send the request.
|
||||||
@ -119,7 +119,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
|||||||
crit!(
|
crit!(
|
||||||
log,
|
log,
|
||||||
"Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters.";
|
"Self rate limiting error for a batch that will never fit. Sending request anyway. Check configuration parameters.";
|
||||||
"protocol" => %req.protocol()
|
"protocol" => %req.versioned_protocol().protocol()
|
||||||
);
|
);
|
||||||
Ok(BehaviourAction::NotifyHandler {
|
Ok(BehaviourAction::NotifyHandler {
|
||||||
peer_id,
|
peer_id,
|
||||||
@ -128,7 +128,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
RateLimitedErr::TooSoon(wait_time) => {
|
RateLimitedErr::TooSoon(wait_time) => {
|
||||||
debug!(log, "Self rate limiting"; "protocol" => %protocol, "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id);
|
debug!(log, "Self rate limiting"; "protocol" => %protocol.protocol(), "wait_time_ms" => wait_time.as_millis(), "peer_id" => %peer_id);
|
||||||
Err((QueuedRequest { req, request_id }, wait_time))
|
Err((QueuedRequest { req, request_id }, wait_time))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,8 @@ use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
|||||||
use crate::rpc::{
|
use crate::rpc::{
|
||||||
methods::{
|
methods::{
|
||||||
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
||||||
OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage,
|
OldBlocksByRangeRequest, OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2,
|
||||||
|
RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage,
|
||||||
},
|
},
|
||||||
OutboundRequest, SubstreamId,
|
OutboundRequest, SubstreamId,
|
||||||
};
|
};
|
||||||
@ -48,15 +49,26 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
|||||||
fn from(req: Request) -> OutboundRequest<TSpec> {
|
fn from(req: Request) -> OutboundRequest<TSpec> {
|
||||||
match req {
|
match req {
|
||||||
Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r),
|
Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r),
|
||||||
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => {
|
Request::BlocksByRange(r) => match r {
|
||||||
OutboundRequest::BlocksByRange(OldBlocksByRangeRequest {
|
BlocksByRangeRequest::V1(req) => OutboundRequest::BlocksByRange(
|
||||||
start_slot,
|
OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 {
|
||||||
count,
|
start_slot: req.start_slot,
|
||||||
|
count: req.count,
|
||||||
step: 1,
|
step: 1,
|
||||||
})
|
}),
|
||||||
|
),
|
||||||
|
BlocksByRangeRequest::V2(req) => OutboundRequest::BlocksByRange(
|
||||||
|
OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 {
|
||||||
|
start_slot: req.start_slot,
|
||||||
|
count: req.count,
|
||||||
|
step: 1,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
Request::LightClientBootstrap(_) => {
|
||||||
|
unreachable!("Lighthouse never makes an outbound light client request")
|
||||||
}
|
}
|
||||||
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
||||||
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
|
|
||||||
Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r),
|
Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r),
|
||||||
Request::Status(s) => OutboundRequest::Status(s),
|
Request::Status(s) => OutboundRequest::Status(s),
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ use crate::peer_manager::{
|
|||||||
ConnectionDirection, PeerManager, PeerManagerEvent,
|
ConnectionDirection, PeerManager, PeerManagerEvent,
|
||||||
};
|
};
|
||||||
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
|
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
|
||||||
|
use crate::rpc::methods::MetadataRequest;
|
||||||
use crate::rpc::*;
|
use crate::rpc::*;
|
||||||
use crate::service::behaviour::BehaviourEvent;
|
use crate::service::behaviour::BehaviourEvent;
|
||||||
pub use crate::service::behaviour::Gossipsub;
|
pub use crate::service::behaviour::Gossipsub;
|
||||||
@ -37,7 +38,6 @@ use slog::{crit, debug, info, o, trace, warn};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::{
|
use std::{
|
||||||
marker::PhantomData,
|
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
};
|
};
|
||||||
@ -949,16 +949,25 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
|
|
||||||
/// Sends a METADATA request to a peer.
|
/// Sends a METADATA request to a peer.
|
||||||
fn send_meta_data_request(&mut self, peer_id: PeerId) {
|
fn send_meta_data_request(&mut self, peer_id: PeerId) {
|
||||||
let event = OutboundRequest::MetaData(PhantomData);
|
// We always prefer sending V2 requests
|
||||||
|
let event = OutboundRequest::MetaData(MetadataRequest::new_v2());
|
||||||
self.eth2_rpc_mut()
|
self.eth2_rpc_mut()
|
||||||
.send_request(peer_id, RequestId::Internal, event);
|
.send_request(peer_id, RequestId::Internal, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sends a METADATA response to a peer.
|
/// Sends a METADATA response to a peer.
|
||||||
fn send_meta_data_response(&mut self, id: PeerRequestId, peer_id: PeerId) {
|
fn send_meta_data_response(
|
||||||
let event = RPCCodedResponse::Success(RPCResponse::MetaData(
|
&mut self,
|
||||||
self.network_globals.local_metadata.read().clone(),
|
req: MetadataRequest<TSpec>,
|
||||||
));
|
id: PeerRequestId,
|
||||||
|
peer_id: PeerId,
|
||||||
|
) {
|
||||||
|
let metadata = self.network_globals.local_metadata.read().clone();
|
||||||
|
let metadata = match req {
|
||||||
|
MetadataRequest::V1(_) => metadata.metadata_v1(),
|
||||||
|
MetadataRequest::V2(_) => metadata,
|
||||||
|
};
|
||||||
|
let event = RPCCodedResponse::Success(RPCResponse::MetaData(metadata));
|
||||||
self.eth2_rpc_mut().send_response(peer_id, id, event);
|
self.eth2_rpc_mut().send_response(peer_id, id, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1207,9 +1216,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
self.pong(peer_request_id, peer_id);
|
self.pong(peer_request_id, peer_id);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
InboundRequest::MetaData(_) => {
|
InboundRequest::MetaData(req) => {
|
||||||
// send the requested meta-data
|
// send the requested meta-data
|
||||||
self.send_meta_data_response((handler_id, id), peer_id);
|
self.send_meta_data_response(req, (handler_id, id), peer_id);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
InboundRequest::Goodbye(reason) => {
|
InboundRequest::Goodbye(reason) => {
|
||||||
@ -1236,13 +1245,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
Some(event)
|
Some(event)
|
||||||
}
|
}
|
||||||
InboundRequest::BlocksByRange(req) => {
|
InboundRequest::BlocksByRange(req) => {
|
||||||
let methods::OldBlocksByRangeRequest {
|
|
||||||
start_slot,
|
|
||||||
mut count,
|
|
||||||
step,
|
|
||||||
} = req;
|
|
||||||
// Still disconnect the peer if the request is naughty.
|
// Still disconnect the peer if the request is naughty.
|
||||||
if step == 0 {
|
let mut count = *req.count();
|
||||||
|
if *req.step() == 0 {
|
||||||
self.peer_manager_mut().handle_rpc_error(
|
self.peer_manager_mut().handle_rpc_error(
|
||||||
&peer_id,
|
&peer_id,
|
||||||
Protocol::BlocksByRange,
|
Protocol::BlocksByRange,
|
||||||
@ -1254,14 +1259,18 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
// return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856
|
// return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856
|
||||||
if step > 1 {
|
if *req.step() > 1 {
|
||||||
count = 1;
|
count = 1;
|
||||||
}
|
}
|
||||||
let event = self.build_request(
|
let request = match req {
|
||||||
peer_request_id,
|
methods::OldBlocksByRangeRequest::V1(req) => Request::BlocksByRange(
|
||||||
peer_id,
|
BlocksByRangeRequest::new_v1(req.start_slot, count),
|
||||||
Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }),
|
),
|
||||||
);
|
methods::OldBlocksByRangeRequest::V2(req) => Request::BlocksByRange(
|
||||||
|
BlocksByRangeRequest::new(req.start_slot, count),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
let event = self.build_request(peer_request_id, peer_id, request);
|
||||||
Some(event)
|
Some(event)
|
||||||
}
|
}
|
||||||
InboundRequest::BlocksByRoot(req) => {
|
InboundRequest::BlocksByRoot(req) => {
|
||||||
|
@ -276,9 +276,11 @@ pub(crate) fn save_metadata_to_disk<E: EthSpec>(
|
|||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) {
|
) {
|
||||||
let _ = std::fs::create_dir_all(dir);
|
let _ = std::fs::create_dir_all(dir);
|
||||||
match File::create(dir.join(METADATA_FILENAME))
|
let metadata_bytes = match metadata {
|
||||||
.and_then(|mut f| f.write_all(&metadata.as_ssz_bytes()))
|
MetaData::V1(md) => md.as_ssz_bytes(),
|
||||||
{
|
MetaData::V2(md) => md.as_ssz_bytes(),
|
||||||
|
};
|
||||||
|
match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
debug!(log, "Metadata written to disk");
|
debug!(log, "Metadata written to disk");
|
||||||
}
|
}
|
||||||
|
@ -155,10 +155,7 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||||
start_slot: 0,
|
|
||||||
count: messages_to_send,
|
|
||||||
});
|
|
||||||
|
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
|
|
||||||
@ -282,10 +279,7 @@ fn test_blocks_by_range_over_limit() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||||
start_slot: 0,
|
|
||||||
count: messages_to_send,
|
|
||||||
});
|
|
||||||
|
|
||||||
// BlocksByRange Response
|
// BlocksByRange Response
|
||||||
let full_block = merge_block_large(&common::fork_context(ForkName::Merge));
|
let full_block = merge_block_large(&common::fork_context(ForkName::Merge));
|
||||||
@ -367,10 +361,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
|
||||||
start_slot: 0,
|
|
||||||
count: messages_to_send,
|
|
||||||
});
|
|
||||||
|
|
||||||
// BlocksByRange Response
|
// BlocksByRange Response
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
@ -490,10 +481,7 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10));
|
||||||
start_slot: 0,
|
|
||||||
count: 10,
|
|
||||||
});
|
|
||||||
|
|
||||||
// BlocksByRange Response
|
// BlocksByRange Response
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
@ -594,16 +582,15 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await;
|
||||||
|
|
||||||
// BlocksByRoot Request
|
// BlocksByRoot Request
|
||||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
let rpc_request =
|
||||||
block_roots: VariableList::from(vec![
|
Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
]),
|
])));
|
||||||
});
|
|
||||||
|
|
||||||
// BlocksByRoot Response
|
// BlocksByRoot Response
|
||||||
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
||||||
@ -722,8 +709,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await;
|
||||||
|
|
||||||
// BlocksByRoot Request
|
// BlocksByRoot Request
|
||||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
let rpc_request =
|
||||||
block_roots: VariableList::from(vec![
|
Request::BlocksByRoot(BlocksByRootRequest::new(VariableList::from(vec![
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
@ -734,8 +721,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
Hash256::from_low_u64_be(0),
|
Hash256::from_low_u64_be(0),
|
||||||
]),
|
])));
|
||||||
});
|
|
||||||
|
|
||||||
// BlocksByRoot Response
|
// BlocksByRoot Response
|
||||||
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
let full_block = BeaconBlock::Base(BeaconBlockBase::<E>::full(&spec));
|
||||||
|
@ -22,7 +22,7 @@ slot_clock = { path = "../../common/slot_clock" }
|
|||||||
slog = { version = "2.5.2", features = ["max_level_trace", "nested-values"] }
|
slog = { version = "2.5.2", features = ["max_level_trace", "nested-values"] }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
futures = "0.3.7"
|
futures = "0.3.7"
|
||||||
error-chain = "0.12.4"
|
error-chain = "0.12.4"
|
||||||
tokio = { version = "1.14.0", features = ["full"] }
|
tokio = { version = "1.14.0", features = ["full"] }
|
||||||
|
@ -9,8 +9,8 @@ use beacon_chain::{
|
|||||||
observed_operations::ObservationOutcome,
|
observed_operations::ObservationOutcome,
|
||||||
sync_committee_verification::{self, Error as SyncCommitteeError},
|
sync_committee_verification::{self, Error as SyncCommitteeError},
|
||||||
validator_monitor::get_block_delay_ms,
|
validator_monitor::get_block_delay_ms,
|
||||||
AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized,
|
AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError,
|
||||||
ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer,
|
GossipVerifiedBlock, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
||||||
use operation_pool::ReceivedPreCapella;
|
use operation_pool::ReceivedPreCapella;
|
||||||
@ -756,11 +756,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let blob_root = verified_blob.block_root();
|
let blob_root = verified_blob.block_root();
|
||||||
let blob_slot = verified_blob.slot();
|
let blob_slot = verified_blob.slot();
|
||||||
let blob_clone = verified_blob.clone().to_blob();
|
let blob_clone = verified_blob.clone().to_blob();
|
||||||
match self
|
match self.chain.process_blob(verified_blob).await {
|
||||||
.chain
|
|
||||||
.process_blob(verified_blob, CountUnrealized::True)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
|
Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
|
||||||
//TODO(sean) add metrics and logging
|
//TODO(sean) add metrics and logging
|
||||||
self.chain.recompute_head_at_current_slot().await;
|
self.chain.recompute_head_at_current_slot().await;
|
||||||
@ -978,7 +974,6 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
| Err(e @ BlockError::NonLinearParentRoots)
|
| Err(e @ BlockError::NonLinearParentRoots)
|
||||||
| Err(e @ BlockError::BlockIsNotLaterThanParent { .. })
|
| Err(e @ BlockError::BlockIsNotLaterThanParent { .. })
|
||||||
| Err(e @ BlockError::InvalidSignature)
|
| Err(e @ BlockError::InvalidSignature)
|
||||||
| Err(e @ BlockError::TooManySkippedSlots { .. })
|
|
||||||
| Err(e @ BlockError::WeakSubjectivityConflict)
|
| Err(e @ BlockError::WeakSubjectivityConflict)
|
||||||
| Err(e @ BlockError::InconsistentFork(_))
|
| Err(e @ BlockError::InconsistentFork(_))
|
||||||
| Err(e @ BlockError::ExecutionPayloadError(_))
|
| Err(e @ BlockError::ExecutionPayloadError(_))
|
||||||
@ -1103,12 +1098,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
verified_block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match &result {
|
match &result {
|
||||||
@ -1903,7 +1893,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"attn_agg_not_in_committee",
|
"attn_agg_not_in_committee",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
AttnError::AttestationAlreadyKnown { .. } => {
|
AttnError::AttestationSupersetKnown { .. } => {
|
||||||
/*
|
/*
|
||||||
* The aggregate attestation has already been observed on the network or in
|
* The aggregate attestation has already been observed on the network or in
|
||||||
* a block.
|
* a block.
|
||||||
@ -2415,7 +2405,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"sync_bad_aggregator",
|
"sync_bad_aggregator",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
SyncCommitteeError::SyncContributionAlreadyKnown(_)
|
SyncCommitteeError::SyncContributionSupersetKnown(_)
|
||||||
| SyncCommitteeError::AggregatorAlreadyKnown(_) => {
|
| SyncCommitteeError::AggregatorAlreadyKnown(_) => {
|
||||||
/*
|
/*
|
||||||
* The sync committee message already been observed on the network or in
|
* The sync committee message already been observed on the network or in
|
||||||
|
@ -5,7 +5,7 @@ use crate::sync::SyncMessage;
|
|||||||
use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped};
|
use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use lighthouse_network::rpc::methods::{
|
use lighthouse_network::rpc::methods::{
|
||||||
BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS,
|
BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOB_SIDECARS, MAX_REQUEST_BLOCKS_DENEB,
|
||||||
};
|
};
|
||||||
use lighthouse_network::rpc::StatusMessage;
|
use lighthouse_network::rpc::StatusMessage;
|
||||||
use lighthouse_network::rpc::*;
|
use lighthouse_network::rpc::*;
|
||||||
@ -139,10 +139,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
request_id: PeerRequestId,
|
request_id: PeerRequestId,
|
||||||
request: BlocksByRootRequest,
|
request: BlocksByRootRequest,
|
||||||
) {
|
) {
|
||||||
let requested_blocks = request.block_roots.len();
|
let requested_blocks = request.block_roots().len();
|
||||||
let mut block_stream = match self
|
let mut block_stream = match self
|
||||||
.chain
|
.chain
|
||||||
.get_blocks_checking_early_attester_cache(request.block_roots.into(), &executor)
|
.get_blocks_checking_early_attester_cache(request.block_roots().to_vec(), &executor)
|
||||||
{
|
{
|
||||||
Ok(block_stream) => block_stream,
|
Ok(block_stream) => block_stream,
|
||||||
Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e),
|
Err(e) => return error!(self.log, "Error getting block stream"; "error" => ?e),
|
||||||
@ -379,23 +379,25 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
) {
|
) {
|
||||||
debug!(self.log, "Received BlocksByRange Request";
|
debug!(self.log, "Received BlocksByRange Request";
|
||||||
"peer_id" => %peer_id,
|
"peer_id" => %peer_id,
|
||||||
"count" => req.count,
|
"count" => req.count(),
|
||||||
"start_slot" => req.start_slot,
|
"start_slot" => req.start_slot(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Should not send more than max request blocks
|
// Should not send more than max request blocks
|
||||||
if req.count > MAX_REQUEST_BLOCKS {
|
// TODO: We should switch the limit to `MAX_REQUEST_BLOCKS` at the fork,
|
||||||
|
// or maybe consider switching the max value given the fork context.
|
||||||
|
if *req.count() > MAX_REQUEST_BLOCKS_DENEB {
|
||||||
return self.send_error_response(
|
return self.send_error_response(
|
||||||
peer_id,
|
peer_id,
|
||||||
RPCResponseErrorCode::InvalidRequest,
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
"Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`".into(),
|
"Request exceeded `MAX_REQUEST_BLOCKS_DENEB`".into(),
|
||||||
request_id,
|
request_id,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let forwards_block_root_iter = match self
|
let forwards_block_root_iter = match self
|
||||||
.chain
|
.chain
|
||||||
.forwards_iter_block_roots(Slot::from(req.start_slot))
|
.forwards_iter_block_roots(Slot::from(*req.start_slot()))
|
||||||
{
|
{
|
||||||
Ok(iter) => iter,
|
Ok(iter) => iter,
|
||||||
Err(BeaconChainError::HistoricalBlockError(
|
Err(BeaconChainError::HistoricalBlockError(
|
||||||
@ -432,7 +434,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
// Pick out the required blocks, ignoring skip-slots.
|
// Pick out the required blocks, ignoring skip-slots.
|
||||||
let mut last_block_root = req
|
let mut last_block_root = req
|
||||||
.start_slot
|
.start_slot()
|
||||||
.checked_sub(1)
|
.checked_sub(1)
|
||||||
.map(|prev_slot| {
|
.map(|prev_slot| {
|
||||||
self.chain
|
self.chain
|
||||||
@ -443,7 +445,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
.flatten()
|
.flatten()
|
||||||
.flatten();
|
.flatten();
|
||||||
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
||||||
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
|
iter.take_while(|(_, slot)| {
|
||||||
|
slot.as_u64() < req.start_slot().saturating_add(*req.count())
|
||||||
|
})
|
||||||
// map skip slots to None
|
// map skip slots to None
|
||||||
.map(|(root, _)| {
|
.map(|(root, _)| {
|
||||||
let result = if Some(root) == last_block_root {
|
let result = if Some(root) == last_block_root {
|
||||||
@ -487,8 +491,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
Ok(Some(block)) => {
|
Ok(Some(block)) => {
|
||||||
// Due to skip slots, blocks could be out of the range, we ensure they
|
// Due to skip slots, blocks could be out of the range, we ensure they
|
||||||
// are in the range before sending
|
// are in the range before sending
|
||||||
if block.slot() >= req.start_slot
|
if block.slot() >= *req.start_slot()
|
||||||
&& block.slot() < req.start_slot + req.count
|
&& block.slot() < req.start_slot() + req.count()
|
||||||
{
|
{
|
||||||
blocks_sent += 1;
|
blocks_sent += 1;
|
||||||
self.send_network_message(NetworkMessage::SendResponse {
|
self.send_network_message(NetworkMessage::SendResponse {
|
||||||
@ -572,15 +576,15 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
.slot()
|
.slot()
|
||||||
.unwrap_or_else(|_| self.chain.slot_clock.genesis_slot());
|
.unwrap_or_else(|_| self.chain.slot_clock.genesis_slot());
|
||||||
|
|
||||||
if blocks_sent < (req.count as usize) {
|
if blocks_sent < (*req.count() as usize) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"BlocksByRange outgoing response processed";
|
"BlocksByRange outgoing response processed";
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
"msg" => "Failed to return all requested blocks",
|
"msg" => "Failed to return all requested blocks",
|
||||||
"start_slot" => req.start_slot,
|
"start_slot" => req.start_slot(),
|
||||||
"current_slot" => current_slot,
|
"current_slot" => current_slot,
|
||||||
"requested" => req.count,
|
"requested" => req.count(),
|
||||||
"returned" => blocks_sent
|
"returned" => blocks_sent
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
@ -588,9 +592,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"BlocksByRange outgoing response processed";
|
"BlocksByRange outgoing response processed";
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
"start_slot" => req.start_slot,
|
"start_slot" => req.start_slot(),
|
||||||
"current_slot" => current_slot,
|
"current_slot" => current_slot,
|
||||||
"requested" => req.count,
|
"requested" => req.count(),
|
||||||
"returned" => blocks_sent
|
"returned" => blocks_sent
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -10,12 +10,12 @@ use crate::sync::{BatchProcessResult, ChainId};
|
|||||||
use beacon_chain::blob_verification::BlockWrapper;
|
use beacon_chain::blob_verification::BlockWrapper;
|
||||||
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock};
|
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock};
|
||||||
use beacon_chain::data_availability_checker::AvailabilityCheckError;
|
use beacon_chain::data_availability_checker::AvailabilityCheckError;
|
||||||
|
use beacon_chain::AvailabilityProcessingStatus;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
||||||
NotifyExecutionLayer,
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use beacon_chain::{AvailabilityProcessingStatus, CountUnrealized};
|
|
||||||
use lighthouse_network::PeerAction;
|
use lighthouse_network::PeerAction;
|
||||||
use slog::{debug, error, info, warn};
|
use slog::{debug, error, info, warn};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -28,7 +28,7 @@ use types::{Epoch, Hash256};
|
|||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub enum ChainSegmentProcessId {
|
pub enum ChainSegmentProcessId {
|
||||||
/// Processing Id of a range syncing batch.
|
/// Processing Id of a range syncing batch.
|
||||||
RangeBatchId(ChainId, Epoch, CountUnrealized),
|
RangeBatchId(ChainId, Epoch),
|
||||||
/// Processing ID for a backfill syncing batch.
|
/// Processing ID for a backfill syncing batch.
|
||||||
BackSyncBatchId(Epoch),
|
BackSyncBatchId(Epoch),
|
||||||
/// Processing Id of the parent lookup of a block.
|
/// Processing Id of the parent lookup of a block.
|
||||||
@ -171,12 +171,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(
|
.process_block(block_root, block, NotifyExecutionLayer::Yes)
|
||||||
block_root,
|
|
||||||
block,
|
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||||
@ -233,15 +228,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.check_availability_and_maybe_import(
|
.check_availability_and_maybe_import(slot, |chain| {
|
||||||
slot,
|
|
||||||
|chain| {
|
|
||||||
chain
|
chain
|
||||||
.data_availability_checker
|
.data_availability_checker
|
||||||
.put_rpc_blobs(block_root, blobs)
|
.put_rpc_blobs(block_root, blobs)
|
||||||
},
|
})
|
||||||
CountUnrealized::True,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Sync handles these results
|
// Sync handles these results
|
||||||
@ -262,17 +253,13 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
) {
|
) {
|
||||||
let result = match sync_type {
|
let result = match sync_type {
|
||||||
// this a request from the range sync
|
// this a request from the range sync
|
||||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => {
|
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||||
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
|
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
|
||||||
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
||||||
let sent_blocks = downloaded_blocks.len();
|
let sent_blocks = downloaded_blocks.len();
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.process_blocks(
|
.process_blocks(downloaded_blocks.iter(), notify_execution_layer)
|
||||||
downloaded_blocks.iter(),
|
|
||||||
count_unrealized,
|
|
||||||
notify_execution_layer,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(_, Ok(_)) => {
|
(_, Ok(_)) => {
|
||||||
@ -357,11 +344,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
||||||
// reverse
|
// reverse
|
||||||
match self
|
match self
|
||||||
.process_blocks(
|
.process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer)
|
||||||
downloaded_blocks.iter().rev(),
|
|
||||||
CountUnrealized::True,
|
|
||||||
notify_execution_layer,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(imported_blocks, Err(e)) => {
|
(imported_blocks, Err(e)) => {
|
||||||
@ -391,13 +374,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
async fn process_blocks<'a>(
|
async fn process_blocks<'a>(
|
||||||
&self,
|
&self,
|
||||||
downloaded_blocks: impl Iterator<Item = &'a BlockWrapper<T::EthSpec>>,
|
downloaded_blocks: impl Iterator<Item = &'a BlockWrapper<T::EthSpec>>,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> (usize, Result<(), ChainSegmentFailed>) {
|
) -> (usize, Result<(), ChainSegmentFailed>) {
|
||||||
let blocks: Vec<_> = downloaded_blocks.cloned().collect();
|
let blocks: Vec<_> = downloaded_blocks.cloned().collect();
|
||||||
match self
|
match self
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
.process_chain_segment(blocks, notify_execution_layer)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
ChainSegmentResult::Successful { imported_blocks } => {
|
ChainSegmentResult::Successful { imported_blocks } => {
|
||||||
|
@ -491,9 +491,9 @@ impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS
|
|||||||
self.block_request_state.state.state,
|
self.block_request_state.state.state,
|
||||||
State::AwaitingDownload
|
State::AwaitingDownload
|
||||||
));
|
));
|
||||||
let request = BlocksByRootRequest {
|
let request = BlocksByRootRequest::new(VariableList::from(vec![
|
||||||
block_roots: VariableList::from(vec![self.block_request_state.requested_block_root]),
|
self.block_request_state.requested_block_root,
|
||||||
};
|
]));
|
||||||
let response_type = ResponseType::Block;
|
let response_type = ResponseType::Block;
|
||||||
if self.too_many_attempts(response_type) {
|
if self.too_many_attempts(response_type) {
|
||||||
Err(LookupRequestError::TooManyAttempts {
|
Err(LookupRequestError::TooManyAttempts {
|
||||||
|
@ -707,7 +707,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
.parent_block_processed(chain_hash, result, response_type, &mut self.network),
|
.parent_block_processed(chain_hash, result, response_type, &mut self.network),
|
||||||
},
|
},
|
||||||
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
||||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
|
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => {
|
||||||
self.range_sync.handle_block_process_result(
|
self.range_sync.handle_block_process_result(
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
chain_id,
|
chain_id,
|
||||||
|
@ -162,7 +162,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRange request";
|
"Sending BlocksByRange request";
|
||||||
"method" => "BlocksByRange",
|
"method" => "BlocksByRange",
|
||||||
"count" => request.count,
|
"count" => request.count(),
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
);
|
);
|
||||||
let request = Request::BlocksByRange(request);
|
let request = Request::BlocksByRange(request);
|
||||||
@ -181,7 +181,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRange and BlobsByRange requests";
|
"Sending BlocksByRange and BlobsByRange requests";
|
||||||
"method" => "Mixed by range request",
|
"method" => "Mixed by range request",
|
||||||
"count" => request.count,
|
"count" => request.count(),
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -191,8 +191,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
|
|
||||||
// Create the blob request based on the blob request.
|
// Create the blob request based on the blob request.
|
||||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||||
start_slot: request.start_slot,
|
start_slot: *request.start_slot(),
|
||||||
count: request.count,
|
count: *request.count(),
|
||||||
});
|
});
|
||||||
let blocks_request = Request::BlocksByRange(request);
|
let blocks_request = Request::BlocksByRange(request);
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending backfill BlocksByRange request";
|
"Sending backfill BlocksByRange request";
|
||||||
"method" => "BlocksByRange",
|
"method" => "BlocksByRange",
|
||||||
"count" => request.count,
|
"count" => request.count(),
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
);
|
);
|
||||||
let request = Request::BlocksByRange(request);
|
let request = Request::BlocksByRange(request);
|
||||||
@ -254,7 +254,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending backfill BlocksByRange and BlobsByRange requests";
|
"Sending backfill BlocksByRange and BlobsByRange requests";
|
||||||
"method" => "Mixed by range request",
|
"method" => "Mixed by range request",
|
||||||
"count" => request.count,
|
"count" => request.count(),
|
||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -264,8 +264,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
|
|
||||||
// Create the blob request based on the blob request.
|
// Create the blob request based on the blob request.
|
||||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||||
start_slot: request.start_slot,
|
start_slot: *request.start_slot(),
|
||||||
count: request.count,
|
count: *request.count(),
|
||||||
});
|
});
|
||||||
let blocks_request = Request::BlocksByRange(request);
|
let blocks_request = Request::BlocksByRange(request);
|
||||||
|
|
||||||
@ -422,7 +422,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRoot Request";
|
"Sending BlocksByRoot Request";
|
||||||
"method" => "BlocksByRoot",
|
"method" => "BlocksByRoot",
|
||||||
"count" => request.block_roots.len(),
|
"count" => request.block_roots().len(),
|
||||||
"peer" => %peer_id
|
"peer" => %peer_id
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -472,7 +472,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Sending parent BlocksByRoot Request";
|
"Sending parent BlocksByRoot Request";
|
||||||
"method" => "BlocksByRoot",
|
"method" => "BlocksByRoot",
|
||||||
"count" => request.block_roots.len(),
|
"count" => request.block_roots().len(),
|
||||||
"peer" => %peer_id
|
"peer" => %peer_id
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -220,10 +220,10 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
/// Returns a BlocksByRange request associated with the batch.
|
/// Returns a BlocksByRange request associated with the batch.
|
||||||
pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) {
|
pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) {
|
||||||
(
|
(
|
||||||
BlocksByRangeRequest {
|
BlocksByRangeRequest::new(
|
||||||
start_slot: self.start_slot.into(),
|
self.start_slot.into(),
|
||||||
count: self.end_slot.sub(self.start_slot).into(),
|
self.end_slot.sub(self.start_slot).into(),
|
||||||
},
|
),
|
||||||
self.batch_type,
|
self.batch_type,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ use crate::sync::{
|
|||||||
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
||||||
};
|
};
|
||||||
use beacon_chain::blob_verification::BlockWrapper;
|
use beacon_chain::blob_verification::BlockWrapper;
|
||||||
use beacon_chain::{BeaconChainTypes, CountUnrealized};
|
use beacon_chain::BeaconChainTypes;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use lighthouse_network::{PeerAction, PeerId};
|
use lighthouse_network::{PeerAction, PeerId};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
@ -101,8 +101,6 @@ pub struct SyncingChain<T: BeaconChainTypes> {
|
|||||||
/// Batches validated by this chain.
|
/// Batches validated by this chain.
|
||||||
validated_batches: u64,
|
validated_batches: u64,
|
||||||
|
|
||||||
is_finalized_segment: bool,
|
|
||||||
|
|
||||||
/// The chain's log.
|
/// The chain's log.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
@ -128,7 +126,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
target_head_slot: Slot,
|
target_head_slot: Slot,
|
||||||
target_head_root: Hash256,
|
target_head_root: Hash256,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
is_finalized_segment: bool,
|
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut peers = FnvHashMap::default();
|
let mut peers = FnvHashMap::default();
|
||||||
@ -136,16 +133,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
|
|
||||||
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
||||||
|
|
||||||
let target_slot = if is_finalized_segment {
|
|
||||||
target_head_slot + (2 * T::EthSpec::slots_per_epoch()) + 1
|
|
||||||
} else {
|
|
||||||
target_head_slot
|
|
||||||
};
|
|
||||||
|
|
||||||
SyncingChain {
|
SyncingChain {
|
||||||
id,
|
id,
|
||||||
start_epoch,
|
start_epoch,
|
||||||
target_head_slot: target_slot,
|
target_head_slot,
|
||||||
target_head_root,
|
target_head_root,
|
||||||
batches: BTreeMap::new(),
|
batches: BTreeMap::new(),
|
||||||
peers,
|
peers,
|
||||||
@ -156,7 +147,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
state: ChainSyncingState::Stopped,
|
state: ChainSyncingState::Stopped,
|
||||||
current_processing_batch: None,
|
current_processing_batch: None,
|
||||||
validated_batches: 0,
|
validated_batches: 0,
|
||||||
is_finalized_segment,
|
|
||||||
log: log.new(o!("chain" => id)),
|
log: log.new(o!("chain" => id)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -324,12 +314,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
// for removing chains and checking completion is in the callback.
|
// for removing chains and checking completion is in the callback.
|
||||||
|
|
||||||
let blocks = batch.start_processing()?;
|
let blocks = batch.start_processing()?;
|
||||||
let count_unrealized = if self.is_finalized_segment {
|
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id);
|
||||||
CountUnrealized::False
|
|
||||||
} else {
|
|
||||||
CountUnrealized::True
|
|
||||||
};
|
|
||||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
|
||||||
self.current_processing_batch = Some(batch_id);
|
self.current_processing_batch = Some(batch_id);
|
||||||
|
|
||||||
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks);
|
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks);
|
||||||
|
@ -465,10 +465,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
|||||||
network: &mut SyncNetworkContext<T>,
|
network: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
|
||||||
let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type {
|
let collection = if let RangeSyncType::Finalized = sync_type {
|
||||||
(&mut self.finalized_chains, true)
|
&mut self.finalized_chains
|
||||||
} else {
|
} else {
|
||||||
(&mut self.head_chains, false)
|
&mut self.head_chains
|
||||||
};
|
};
|
||||||
match collection.entry(id) {
|
match collection.entry(id) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
@ -493,7 +493,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
|
|||||||
target_head_slot,
|
target_head_slot,
|
||||||
target_head_root,
|
target_head_root,
|
||||||
peer,
|
peer,
|
||||||
is_finalized,
|
|
||||||
&self.log,
|
&self.log,
|
||||||
);
|
);
|
||||||
debug_assert_eq!(new_chain.get_id(), id);
|
debug_assert_eq!(new_chain.get_id(), id);
|
||||||
|
@ -142,13 +142,20 @@ where
|
|||||||
debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id);
|
debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id);
|
||||||
self.awaiting_head_peers.remove(&peer_id);
|
self.awaiting_head_peers.remove(&peer_id);
|
||||||
|
|
||||||
|
// Because of our change in finalized sync batch size from 2 to 1 and our transition
|
||||||
|
// to using exact epoch boundaries for batches (rather than one slot past the epoch
|
||||||
|
// boundary), we need to sync finalized sync to 2 epochs + 1 slot past our peer's
|
||||||
|
// finalized slot in order to finalize the chain locally.
|
||||||
|
let target_head_slot =
|
||||||
|
remote_finalized_slot + (2 * T::EthSpec::slots_per_epoch()) + 1;
|
||||||
|
|
||||||
// Note: We keep current head chains. These can continue syncing whilst we complete
|
// Note: We keep current head chains. These can continue syncing whilst we complete
|
||||||
// this new finalized chain.
|
// this new finalized chain.
|
||||||
|
|
||||||
self.chains.add_peer_or_create_chain(
|
self.chains.add_peer_or_create_chain(
|
||||||
local_info.finalized_epoch,
|
local_info.finalized_epoch,
|
||||||
remote_info.finalized_root,
|
remote_info.finalized_root,
|
||||||
remote_finalized_slot,
|
target_head_slot,
|
||||||
peer_id,
|
peer_id,
|
||||||
RangeSyncType::Finalized,
|
RangeSyncType::Finalized,
|
||||||
network,
|
network,
|
||||||
|
@ -123,7 +123,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.value_name("PORT")
|
.value_name("PORT")
|
||||||
.help("The UDP port that discovery will listen on over IpV6 if listening over \
|
.help("The UDP port that discovery will listen on over IpV6 if listening over \
|
||||||
both Ipv4 and IpV6. Defaults to `port6`")
|
both Ipv4 and IpV6. Defaults to `port6`")
|
||||||
.hidden(true) // TODO: implement dual stack via two sockets in discv5.
|
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
@ -205,7 +204,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
discovery. Set this only if you are sure other nodes can connect to your \
|
discovery. Set this only if you are sure other nodes can connect to your \
|
||||||
local node on this address. This will update the `ip4` or `ip6` ENR fields \
|
local node on this address. This will update the `ip4` or `ip6` ENR fields \
|
||||||
accordingly. To update both, set this flag twice with the different values.")
|
accordingly. To update both, set this flag twice with the different values.")
|
||||||
.requires("enr-udp-port")
|
|
||||||
.multiple(true)
|
.multiple(true)
|
||||||
.max_values(2)
|
.max_values(2)
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
@ -729,7 +727,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
Arg::with_name("max-skip-slots")
|
Arg::with_name("max-skip-slots")
|
||||||
.long("max-skip-slots")
|
.long("max-skip-slots")
|
||||||
.help(
|
.help(
|
||||||
"Refuse to skip more than this many slots when processing a block or attestation. \
|
"Refuse to skip more than this many slots when processing an attestation. \
|
||||||
This prevents nodes on minority forks from wasting our time and disk space, \
|
This prevents nodes on minority forks from wasting our time and disk space, \
|
||||||
but could also cause unnecessary consensus failures, so is disabled by default."
|
but could also cause unnecessary consensus failures, so is disabled by default."
|
||||||
)
|
)
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
\rput[bl](9.0,-3.49){27.3 hours}
|
\rput[bl](9.0,-3.49){27.3 hours}
|
||||||
\rput[bl](8.8,-5.49){Varying time}
|
\rput[bl](8.8,-5.49){Varying time}
|
||||||
\rput[bl](8.7,-5.99){validator sweep}
|
\rput[bl](8.7,-5.99){validator sweep}
|
||||||
\rput[bl](8.9,-6.59){up to 5 days}
|
\rput[bl](8.9,-6.59){up to \textit{n} days}
|
||||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89)
|
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89)
|
||||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89)
|
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89)
|
||||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29)
|
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29)
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
\rput[bl](0.9,-1.59){Beacon chain}
|
\rput[bl](0.9,-1.59){Beacon chain}
|
||||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09)
|
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09)
|
||||||
\rput[bl](7.6,-3.99){validator sweep}
|
\rput[bl](7.6,-3.99){validator sweep}
|
||||||
\rput[bl](7.5,-4.69){$\sim$ every 5 days}
|
\rput[bl](7.82,-4.73){every \textit{n} days}
|
||||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09)
|
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09)
|
||||||
\rput[bl](1.3,-4.09){BLS to}
|
\rput[bl](1.3,-4.09){BLS to}
|
||||||
\rput[bl](0.5,-4.69){execution change}
|
\rput[bl](0.5,-4.69){execution change}
|
||||||
|
@ -47,7 +47,7 @@
|
|||||||
* [Running a Slasher](./slasher.md)
|
* [Running a Slasher](./slasher.md)
|
||||||
* [Redundancy](./redundancy.md)
|
* [Redundancy](./redundancy.md)
|
||||||
* [Release Candidates](./advanced-release-candidates.md)
|
* [Release Candidates](./advanced-release-candidates.md)
|
||||||
* [Maximal Extractable Value (MEV)](./builders.md)
|
* [MEV](./builders.md)
|
||||||
* [Merge Migration](./merge-migration.md)
|
* [Merge Migration](./merge-migration.md)
|
||||||
* [Late Block Re-orgs](./late-block-re-orgs.md)
|
* [Late Block Re-orgs](./late-block-re-orgs.md)
|
||||||
* [Contributing](./contributing.md)
|
* [Contributing](./contributing.md)
|
||||||
|
@ -28,7 +28,7 @@ some example values.
|
|||||||
| Research | 32 | 3.4 TB | 155 ms |
|
| Research | 32 | 3.4 TB | 155 ms |
|
||||||
| Block explorer/analysis | 128 | 851 GB | 620 ms |
|
| Block explorer/analysis | 128 | 851 GB | 620 ms |
|
||||||
| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s |
|
| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s |
|
||||||
| EHobbyist | 4096 | 26.8 GB | 20.5 s |
|
| Hobbyist | 4096 | 26.8 GB | 20.5 s |
|
||||||
| Validator only (default) | 8192 | 8.1 GB | 41 s |
|
| Validator only (default) | 8192 | 8.1 GB | 41 s |
|
||||||
|
|
||||||
*Last update: May 2023.
|
*Last update: May 2023.
|
||||||
|
@ -38,7 +38,6 @@ large peer count will not speed up sync.
|
|||||||
For these reasons, we recommend users do not modify the `--target-peers` count
|
For these reasons, we recommend users do not modify the `--target-peers` count
|
||||||
drastically and use the (recommended) default.
|
drastically and use the (recommended) default.
|
||||||
|
|
||||||
|
|
||||||
### NAT Traversal (Port Forwarding)
|
### NAT Traversal (Port Forwarding)
|
||||||
|
|
||||||
Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will
|
Lighthouse, by default, uses port 9000 for both TCP and UDP. Lighthouse will
|
||||||
@ -107,3 +106,78 @@ Modifying the ENR settings can degrade the discovery of your node, making it
|
|||||||
harder for peers to find you or potentially making it harder for other peers to
|
harder for peers to find you or potentially making it harder for other peers to
|
||||||
find each other. We recommend not touching these settings unless for a more
|
find each other. We recommend not touching these settings unless for a more
|
||||||
advanced use case.
|
advanced use case.
|
||||||
|
|
||||||
|
|
||||||
|
### IPv6 support
|
||||||
|
|
||||||
|
As noted in the previous sections, two fundamental parts to ensure good
|
||||||
|
connectivity are: The parameters that configure the sockets over which
|
||||||
|
Lighthouse listens for connections, and the parameters used to tell other peers
|
||||||
|
how to connect to your node. This distinction is relevant and applies to most
|
||||||
|
nodes that do not run directly on a public network.
|
||||||
|
|
||||||
|
#### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack
|
||||||
|
|
||||||
|
To listen over only IPv6 use the same parameters as done when listening over
|
||||||
|
IPv4 only:
|
||||||
|
|
||||||
|
- `--listen-addresses :: --port 9909` will listen over IPv6 using port `9909` for
|
||||||
|
TCP and UDP.
|
||||||
|
- `--listen-addresses :: --port 9909 --discovery-port 9999` will listen over
|
||||||
|
IPv6 using port `9909` for TCP and port `9999` for UDP.
|
||||||
|
|
||||||
|
To listen over both IPv4 and IPv6:
|
||||||
|
- Set two listening addresses using the `--listen-addresses` flag twice ensuring
|
||||||
|
the two addresses are one IPv4, and the other IPv6. When doing so, the
|
||||||
|
`--port` and `--discovery-port` flags will apply exclusively to IPv4. Note
|
||||||
|
that this behaviour differs from the Ipv6 only case described above.
|
||||||
|
- If necessary, set the `--port6` flag to configure the port used for TCP and
|
||||||
|
UDP over IPv6. This flag has no effect when listening over IPv6 only.
|
||||||
|
- If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP
|
||||||
|
port. This will default to the value given to `--port6` if not set. This flag
|
||||||
|
has no effect when listening over IPv6 only.
|
||||||
|
|
||||||
|
##### Configuration Examples
|
||||||
|
|
||||||
|
- `--listen-addresses :: --listen-addresses 0.0.0.0 --port 9909` will listen
|
||||||
|
over IPv4 using port `9909` for TCP and UDP. It will also listen over IPv6 but
|
||||||
|
using the default value for `--port6` for UDP and TCP (`9090`).
|
||||||
|
- `--listen-addresses :: --listen-addresses --port 9909 --discovery-port6 9999`
|
||||||
|
will have the same configuration as before except for the IPv6 UDP socket,
|
||||||
|
which will use port `9999`.
|
||||||
|
|
||||||
|
#### Configuring Lighthouse to advertise IPv6 reachable addresses
|
||||||
|
Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively,
|
||||||
|
and dual stack using one socket for IPv6 and another socket for IPv6. In both
|
||||||
|
scenarios, the previous sections still apply. In summary:
|
||||||
|
|
||||||
|
> Beacon nodes must advertise their publicly reachable socket address
|
||||||
|
|
||||||
|
In order to do so, lighthouse provides the following CLI options/parameters.
|
||||||
|
|
||||||
|
- `--enr-udp-port` Use this to advertise the port that is publicly reachable
|
||||||
|
over UDP with a publicly reachable IPv4 address. This might differ from the
|
||||||
|
IPv4 port used to listen.
|
||||||
|
- `--enr-udp6-port` Use this to advertise the port that is publicly reachable
|
||||||
|
over UDP with a publicly reachable IPv6 address. This might differ from the
|
||||||
|
IPv6 port used to listen.
|
||||||
|
- `--enr-tcp-port` Use this to advertise the port that is publicly reachable
|
||||||
|
over TCP with a publicly reachable IPv4 address. This might differ from the
|
||||||
|
IPv4 port used to listen.
|
||||||
|
- `--enr-tcp6-port` Use this to advertise the port that is publicly reachable
|
||||||
|
over TCP with a publicly reachable IPv6 address. This might differ from the
|
||||||
|
IPv6 port used to listen.
|
||||||
|
- `--enr-addresses` Use this to advertise publicly reachable addresses. Takes at
|
||||||
|
most two values, one for IPv4 and one for IPv6. Note that a beacon node that
|
||||||
|
advertises some address, must be
|
||||||
|
reachable both over UDP and TCP.
|
||||||
|
|
||||||
|
In the general case, an user will not require to set these explicitly. Update
|
||||||
|
these options only if you can guarantee your node is reachable with these
|
||||||
|
values.
|
||||||
|
|
||||||
|
#### Known caveats
|
||||||
|
|
||||||
|
IPv6 link local addresses are likely to have poor connectivity if used in
|
||||||
|
topologies with more than one interface. Use global addresses for the general
|
||||||
|
case.
|
||||||
|
@ -426,7 +426,8 @@ Example Response Body
|
|||||||
|
|
||||||
## `PATCH /lighthouse/validators/:voting_pubkey`
|
## `PATCH /lighthouse/validators/:voting_pubkey`
|
||||||
|
|
||||||
Update some values for the validator with `voting_pubkey`. The following example updates a validator from `enabled: true` to `enabled: false`
|
Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`,
|
||||||
|
and `graffiti`. The following example updates a validator from `enabled: true` to `enabled: false`.
|
||||||
|
|
||||||
### HTTP Specification
|
### HTTP Specification
|
||||||
|
|
||||||
|
@ -108,13 +108,14 @@ Command:
|
|||||||
```bash
|
```bash
|
||||||
DATADIR=/var/lib/lighthouse
|
DATADIR=/var/lib/lighthouse
|
||||||
curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \
|
curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \
|
||||||
-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)" \
|
-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d '{
|
-d '{
|
||||||
"builder_proposals": true,
|
"builder_proposals": true,
|
||||||
"gas_limit": 30000001
|
"gas_limit": 30000001
|
||||||
}' | jq
|
}' | jq
|
||||||
```
|
```
|
||||||
|
If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`
|
||||||
|
|
||||||
#### Example Response Body
|
#### Example Response Body
|
||||||
|
|
||||||
|
@ -25,10 +25,11 @@
|
|||||||
## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1)
|
## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1)
|
||||||
- [I have a low peer count and it is not increasing](#net-peer)
|
- [I have a low peer count and it is not increasing](#net-peer)
|
||||||
- [How do I update lighthouse?](#net-update)
|
- [How do I update lighthouse?](#net-update)
|
||||||
- [Do I need to set up any port mappings (port forwarding)?](#net-port)
|
- [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding)
|
||||||
- [How can I monitor my validators?](#net-monitor)
|
- [How can I monitor my validators?](#net-monitor)
|
||||||
- [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc)
|
- [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc)
|
||||||
- [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip)
|
- [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip)
|
||||||
|
- [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port)
|
||||||
|
|
||||||
|
|
||||||
## [Miscellaneous](#miscellaneous-1)
|
## [Miscellaneous](#miscellaneous-1)
|
||||||
@ -360,7 +361,7 @@ $ docker pull sigp/lighthouse:v1.0.0
|
|||||||
If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image)
|
If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image)
|
||||||
You just need to make sure the code you have checked out is up to date.
|
You just need to make sure the code you have checked out is up to date.
|
||||||
|
|
||||||
### <a name="net-port"></a> Do I need to set up any port mappings (port forwarding)?
|
### <a name="net-port-forwarding"></a> Do I need to set up any port mappings (port forwarding)?
|
||||||
|
|
||||||
It is not strictly required to open any ports for Lighthouse to connect and
|
It is not strictly required to open any ports for Lighthouse to connect and
|
||||||
participate in the network. Lighthouse should work out-of-the-box. However, if
|
participate in the network. Lighthouse should work out-of-the-box. However, if
|
||||||
@ -386,7 +387,7 @@ For these reasons, we recommend that you make your node publicly accessible.
|
|||||||
|
|
||||||
Lighthouse supports UPnP. If you are behind a NAT with a router that supports
|
Lighthouse supports UPnP. If you are behind a NAT with a router that supports
|
||||||
UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its
|
UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its
|
||||||
initial logs if a route has been established). You can also manually [set up port mappings](./advanced_networking.md) in your router to your local Lighthouse instance. By default,
|
initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md#how-to-open-ports) in your router to your local Lighthouse instance. By default,
|
||||||
Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will
|
Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will
|
||||||
make your Lighthouse node maximally contactable.
|
make your Lighthouse node maximally contactable.
|
||||||
|
|
||||||
@ -421,6 +422,9 @@ The settings are as follows:
|
|||||||
### <a name="net-ip"></a> Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?
|
### <a name="net-ip"></a> Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?
|
||||||
No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used).
|
No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used).
|
||||||
|
|
||||||
|
### <a name="net-port"></a> How to change the TCP/UDP port 9000 that Lighthouse listens on?
|
||||||
|
Use the flag ```--port <PORT>``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```.
|
||||||
|
|
||||||
## Miscellaneous
|
## Miscellaneous
|
||||||
|
|
||||||
### <a name="misc-slashing"></a> What should I do if I lose my slashing protection database?
|
### <a name="misc-slashing"></a> What should I do if I lose my slashing protection database?
|
||||||
|
@ -29,6 +29,8 @@ Lighthouse will first search for the graffiti corresponding to the public key of
|
|||||||
### 2. Setting the graffiti in the `validator_definitions.yml`
|
### 2. Setting the graffiti in the `validator_definitions.yml`
|
||||||
Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal.
|
Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal.
|
||||||
|
|
||||||
|
You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http).
|
||||||
|
|
||||||
Below is an example of the validator_definitions.yml with validator specific graffitis:
|
Below is an example of the validator_definitions.yml with validator specific graffitis:
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
@ -62,3 +64,25 @@ Usage: `lighthouse bn --graffiti fortytwo`
|
|||||||
> 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client.
|
> 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client.
|
||||||
> 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node.
|
> 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node.
|
||||||
> 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti.
|
> 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti.
|
||||||
|
|
||||||
|
### Set Graffiti via HTTP
|
||||||
|
|
||||||
|
Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti
|
||||||
|
both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal
|
||||||
|
without requiring a validator client restart.
|
||||||
|
|
||||||
|
Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification.
|
||||||
|
|
||||||
|
#### Example Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DATADIR=/var/lib/lighthouse
|
||||||
|
curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \
|
||||||
|
-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"graffiti": "Mr F was here"
|
||||||
|
}' | jq
|
||||||
|
```
|
||||||
|
|
||||||
|
A `null` response indicates that the request is successful.
|
Binary file not shown.
Before Width: | Height: | Size: 257 KiB After Width: | Height: | Size: 257 KiB |
Binary file not shown.
Before Width: | Height: | Size: 172 KiB After Width: | Height: | Size: 172 KiB |
@ -16,7 +16,7 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12<sup>
|
|||||||
|
|
||||||
3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`?
|
3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`?
|
||||||
|
|
||||||
No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days.
|
No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01).
|
||||||
|
|
||||||
Figure below summarizes partial withdrawals.
|
Figure below summarizes partial withdrawals.
|
||||||
|
|
||||||
|
@ -43,13 +43,12 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign
|
|||||||
Staying silent and refusing to sign messages will cause the following:
|
Staying silent and refusing to sign messages will cause the following:
|
||||||
|
|
||||||
- 2-3 missed attestations, incurring penalties and missed rewards.
|
- 2-3 missed attestations, incurring penalties and missed rewards.
|
||||||
- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards.
|
|
||||||
- Potentially missed rewards by missing a block proposal (if the validator is an elected block
|
- Potentially missed rewards by missing a block proposal (if the validator is an elected block
|
||||||
proposer, which is unlikely).
|
proposer, which is unlikely).
|
||||||
|
|
||||||
The loss of rewards and penalties incurred due to the missed duties will be very small in
|
The loss of rewards and penalties incurred due to the missed duties will be very small in
|
||||||
dollar-values. Generally, they will equate to around one US dollar (at August 2021 figures) or about
|
dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than
|
||||||
2% of the reward for one validator for one day. Since DP costs so little but can protect a user from
|
1% of the reward for one validator for one day. Since DP costs so little but can protect a user from
|
||||||
slashing, many users will consider this a worthwhile trade-off.
|
slashing, many users will consider this a worthwhile trade-off.
|
||||||
|
|
||||||
The 2-3 epochs of missed duties will be incurred whenever the VC is started (e.g., after an update
|
The 2-3 epochs of missed duties will be incurred whenever the VC is started (e.g., after an update
|
||||||
|
@ -97,7 +97,26 @@ There are two types of withdrawal credentials, `0x00` and `0x01`. To check which
|
|||||||
|
|
||||||
- A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable.
|
- A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable.
|
||||||
|
|
||||||
- A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
|
- A varying time of "validator sweep" that can take up to *n* days with *n* listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
| Number of eligible validators | Ideal scenario *n* | Practical scenario *n* |
|
||||||
|
|:----------------:|:---------------------:|:----:|
|
||||||
|
| 300000 | 2.60 | 2.63 |
|
||||||
|
| 400000 | 3.47 | 3.51 |
|
||||||
|
| 500000 | 4.34 | 4.38 |
|
||||||
|
| 600000 | 5.21 | 5.26 |
|
||||||
|
| 700000 | 6.08 | 6.14 |
|
||||||
|
| 800000 | 6.94 | 7.01 |
|
||||||
|
| 900000 | 7.81 | 7.89 |
|
||||||
|
| 1000000 | 8.68 | 8.77 |
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address.
|
The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address.
|
||||||
|
|
||||||
|
@ -13,13 +13,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.settings(&[clap::AppSettings::ColoredHelp])
|
.settings(&[clap::AppSettings::ColoredHelp])
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("enr-address")
|
Arg::with_name("enr-address")
|
||||||
.value_name("IP-ADDRESS")
|
.long("enr-address")
|
||||||
.help("The external IP address/ DNS address to broadcast to other peers on how to reach this node. \
|
.value_name("ADDRESS")
|
||||||
If a DNS address is provided, the enr-address is set to the IP address it resolves to and \
|
.help("The IP address/ DNS address to broadcast to other peers on how to reach \
|
||||||
does not auto-update based on PONG responses in discovery.")
|
this node. If a DNS address is provided, the enr-address is set to the IP \
|
||||||
|
address it resolves to and does not auto-update based on PONG responses in \
|
||||||
|
discovery. Set this only if you are sure other nodes can connect to your \
|
||||||
|
local node on this address. This will update the `ip4` or `ip6` ENR fields \
|
||||||
|
accordingly. To update both, set this flag twice with the different values.")
|
||||||
|
.multiple(true)
|
||||||
|
.max_values(2)
|
||||||
.required(true)
|
.required(true)
|
||||||
.takes_value(true)
|
|
||||||
.conflicts_with("network-dir")
|
.conflicts_with("network-dir")
|
||||||
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("port")
|
Arg::with_name("port")
|
||||||
@ -29,11 +35,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("9000")
|
.default_value("9000")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("port6")
|
||||||
|
.long("port6")
|
||||||
|
.value_name("PORT")
|
||||||
|
.help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \
|
||||||
|
Ipv6. Defaults to 9090 when required.")
|
||||||
|
.default_value("9090")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("listen-address")
|
Arg::with_name("listen-address")
|
||||||
.long("listen-address")
|
.long("listen-address")
|
||||||
.value_name("ADDRESS")
|
.value_name("ADDRESS")
|
||||||
.help("The address the bootnode will listen for UDP connections.")
|
.help("The address the bootnode will listen for UDP communications. To listen \
|
||||||
|
over IpV4 and IpV6 set this flag twice with the different values.\n\
|
||||||
|
Examples:\n\
|
||||||
|
- --listen-address '0.0.0.0' will listen over Ipv4.\n\
|
||||||
|
- --listen-address '::' will listen over Ipv6.\n\
|
||||||
|
- --listen-address '0.0.0.0' --listen-address '::' will listen over both \
|
||||||
|
Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \
|
||||||
|
multiple Ipv4, or multiple Ipv6 addresses will not be accepted.")
|
||||||
|
.multiple(true)
|
||||||
|
.max_values(2)
|
||||||
.default_value("0.0.0.0")
|
.default_value("0.0.0.0")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
@ -59,6 +83,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.value_name("PORT")
|
.value_name("PORT")
|
||||||
.help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \
|
.help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \
|
||||||
can connect to your local node on this port over IpV6.")
|
can connect to your local node on this port over IpV6.")
|
||||||
|
.conflicts_with("network-dir")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
@ -77,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
Arg::with_name("network-dir")
|
Arg::with_name("network-dir")
|
||||||
.value_name("NETWORK_DIR")
|
.value_name("NETWORK_DIR")
|
||||||
.long("network-dir")
|
.long("network-dir")
|
||||||
.help("The directory which contains the enr and it's assoicated private key")
|
.help("The directory which contains the enr and it's associated private key")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ use beacon_node::{get_data_dir, set_network_config};
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use eth2_network_config::Eth2NetworkConfig;
|
use eth2_network_config::Eth2NetworkConfig;
|
||||||
use lighthouse_network::discovery::create_enr_builder_from_config;
|
use lighthouse_network::discovery::create_enr_builder_from_config;
|
||||||
use lighthouse_network::discv5::IpMode;
|
|
||||||
use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr};
|
use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
discovery::{load_enr_from_disk, use_or_load_enr},
|
discovery::{load_enr_from_disk, use_or_load_enr},
|
||||||
@ -10,13 +9,12 @@ use lighthouse_network::{
|
|||||||
};
|
};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use std::net::SocketAddr;
|
use std::net::{SocketAddrV4, SocketAddrV6};
|
||||||
use std::{marker::PhantomData, path::PathBuf};
|
use std::{marker::PhantomData, path::PathBuf};
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
/// A set of configuration parameters for the bootnode, established from CLI arguments.
|
/// A set of configuration parameters for the bootnode, established from CLI arguments.
|
||||||
pub struct BootNodeConfig<T: EthSpec> {
|
pub struct BootNodeConfig<T: EthSpec> {
|
||||||
pub listen_socket: SocketAddr,
|
|
||||||
// TODO: Generalise to multiaddr
|
// TODO: Generalise to multiaddr
|
||||||
pub boot_nodes: Vec<Enr>,
|
pub boot_nodes: Vec<Enr>,
|
||||||
pub local_enr: Enr,
|
pub local_enr: Enr,
|
||||||
@ -81,31 +79,6 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
network_config.discv5_config.enr_update = false;
|
network_config.discv5_config.enr_update = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// the address to listen on
|
|
||||||
let listen_socket = match network_config.listen_addrs().clone() {
|
|
||||||
lighthouse_network::ListenAddress::V4(v4_addr) => {
|
|
||||||
// Set explicitly as ipv4 otherwise
|
|
||||||
network_config.discv5_config.ip_mode = IpMode::Ip4;
|
|
||||||
v4_addr.udp_socket_addr()
|
|
||||||
}
|
|
||||||
lighthouse_network::ListenAddress::V6(v6_addr) => {
|
|
||||||
// create ipv6 sockets and enable ipv4 mapped addresses.
|
|
||||||
network_config.discv5_config.ip_mode = IpMode::Ip6 {
|
|
||||||
enable_mapped_addresses: false,
|
|
||||||
};
|
|
||||||
|
|
||||||
v6_addr.udp_socket_addr()
|
|
||||||
}
|
|
||||||
lighthouse_network::ListenAddress::DualStack(_v4_addr, v6_addr) => {
|
|
||||||
// create ipv6 sockets and enable ipv4 mapped addresses.
|
|
||||||
network_config.discv5_config.ip_mode = IpMode::Ip6 {
|
|
||||||
enable_mapped_addresses: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
v6_addr.udp_socket_addr()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let private_key = load_private_key(&network_config, &logger);
|
let private_key = load_private_key(&network_config, &logger);
|
||||||
let local_key = CombinedKey::from_libp2p(&private_key)?;
|
let local_key = CombinedKey::from_libp2p(&private_key)?;
|
||||||
|
|
||||||
@ -143,7 +116,7 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
let mut builder = create_enr_builder_from_config(&network_config, enable_tcp);
|
let mut builder = create_enr_builder_from_config(&network_config, enable_tcp);
|
||||||
// If we know of the ENR field, add it to the initial construction
|
// If we know of the ENR field, add it to the initial construction
|
||||||
if let Some(enr_fork_bytes) = enr_fork {
|
if let Some(enr_fork_bytes) = enr_fork {
|
||||||
builder.add_value("eth2", enr_fork_bytes.as_slice());
|
builder.add_value("eth2", &enr_fork_bytes);
|
||||||
}
|
}
|
||||||
builder
|
builder
|
||||||
.build(&local_key)
|
.build(&local_key)
|
||||||
@ -155,7 +128,6 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(BootNodeConfig {
|
Ok(BootNodeConfig {
|
||||||
listen_socket,
|
|
||||||
boot_nodes,
|
boot_nodes,
|
||||||
local_enr,
|
local_enr,
|
||||||
local_key,
|
local_key,
|
||||||
@ -170,7 +142,8 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`.
|
/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`.
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct BootNodeConfigSerialization {
|
pub struct BootNodeConfigSerialization {
|
||||||
pub listen_socket: SocketAddr,
|
pub ipv4_listen_socket: Option<SocketAddrV4>,
|
||||||
|
pub ipv6_listen_socket: Option<SocketAddrV6>,
|
||||||
// TODO: Generalise to multiaddr
|
// TODO: Generalise to multiaddr
|
||||||
pub boot_nodes: Vec<Enr>,
|
pub boot_nodes: Vec<Enr>,
|
||||||
pub local_enr: Enr,
|
pub local_enr: Enr,
|
||||||
@ -183,7 +156,6 @@ impl BootNodeConfigSerialization {
|
|||||||
/// relevant fields of `config`
|
/// relevant fields of `config`
|
||||||
pub fn from_config_ref<T: EthSpec>(config: &BootNodeConfig<T>) -> Self {
|
pub fn from_config_ref<T: EthSpec>(config: &BootNodeConfig<T>) -> Self {
|
||||||
let BootNodeConfig {
|
let BootNodeConfig {
|
||||||
listen_socket,
|
|
||||||
boot_nodes,
|
boot_nodes,
|
||||||
local_enr,
|
local_enr,
|
||||||
local_key: _,
|
local_key: _,
|
||||||
@ -191,8 +163,27 @@ impl BootNodeConfigSerialization {
|
|||||||
phantom: _,
|
phantom: _,
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
|
let (ipv4_listen_socket, ipv6_listen_socket) = match discv5_config.listen_config {
|
||||||
|
lighthouse_network::discv5::ListenConfig::Ipv4 { ip, port } => {
|
||||||
|
(Some(SocketAddrV4::new(ip, port)), None)
|
||||||
|
}
|
||||||
|
lighthouse_network::discv5::ListenConfig::Ipv6 { ip, port } => {
|
||||||
|
(None, Some(SocketAddrV6::new(ip, port, 0, 0)))
|
||||||
|
}
|
||||||
|
lighthouse_network::discv5::ListenConfig::DualStack {
|
||||||
|
ipv4,
|
||||||
|
ipv4_port,
|
||||||
|
ipv6,
|
||||||
|
ipv6_port,
|
||||||
|
} => (
|
||||||
|
Some(SocketAddrV4::new(ipv4, ipv4_port)),
|
||||||
|
Some(SocketAddrV6::new(ipv6, ipv6_port, 0, 0)),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
BootNodeConfigSerialization {
|
BootNodeConfigSerialization {
|
||||||
listen_socket: *listen_socket,
|
ipv4_listen_socket,
|
||||||
|
ipv6_listen_socket,
|
||||||
boot_nodes: boot_nodes.clone(),
|
boot_nodes: boot_nodes.clone(),
|
||||||
local_enr: local_enr.clone(),
|
local_enr: local_enr.clone(),
|
||||||
disable_packet_filter: !discv5_config.enable_packet_filter,
|
disable_packet_filter: !discv5_config.enable_packet_filter,
|
||||||
|
@ -10,7 +10,6 @@ use types::EthSpec;
|
|||||||
|
|
||||||
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||||
let BootNodeConfig {
|
let BootNodeConfig {
|
||||||
listen_socket,
|
|
||||||
boot_nodes,
|
boot_nodes,
|
||||||
local_enr,
|
local_enr,
|
||||||
local_key,
|
local_key,
|
||||||
@ -31,7 +30,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string());
|
let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string());
|
||||||
info!(
|
info!(
|
||||||
log, "Configuration parameters";
|
log, "Configuration parameters";
|
||||||
"listening_address" => %listen_socket,
|
"listening_address" => ?discv5_config.listen_config,
|
||||||
"advertised_v4_address" => ?pretty_v4_socket,
|
"advertised_v4_address" => ?pretty_v4_socket,
|
||||||
"advertised_v6_address" => ?pretty_v6_socket,
|
"advertised_v6_address" => ?pretty_v6_socket,
|
||||||
"eth2" => eth2_field
|
"eth2" => eth2_field
|
||||||
@ -41,6 +40,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
|
|
||||||
// build the contactable multiaddr list, adding the p2p protocol
|
// build the contactable multiaddr list, adding the p2p protocol
|
||||||
info!(log, "Contact information"; "enr" => local_enr.to_base64());
|
info!(log, "Contact information"; "enr" => local_enr.to_base64());
|
||||||
|
info!(log, "Enr details"; "enr" => ?local_enr);
|
||||||
info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p());
|
info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p());
|
||||||
|
|
||||||
// construct the discv5 server
|
// construct the discv5 server
|
||||||
@ -64,7 +64,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start the server
|
// start the server
|
||||||
if let Err(e) = discv5.start(listen_socket).await {
|
if let Err(e) = discv5.start().await {
|
||||||
slog::crit!(log, "Could not start discv5 server"; "error" => %e);
|
slog::crit!(log, "Could not start discv5 server"; "error" => %e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ edition = "2021"
|
|||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
reqwest = { version = "0.11.0", features = ["json", "stream"] }
|
||||||
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
||||||
proto_array = { path = "../../consensus/proto_array", optional = true }
|
proto_array = { path = "../../consensus/proto_array", optional = true }
|
||||||
ethereum_serde_utils = "0.5.0"
|
ethereum_serde_utils = "0.5.0"
|
||||||
@ -26,7 +26,12 @@ futures-util = "0.3.8"
|
|||||||
futures = "0.3.8"
|
futures = "0.3.8"
|
||||||
store = { path = "../../beacon_node/store", optional = true }
|
store = { path = "../../beacon_node/store", optional = true }
|
||||||
slashing_protection = { path = "../../validator_client/slashing_protection", optional = true }
|
slashing_protection = { path = "../../validator_client/slashing_protection", optional = true }
|
||||||
|
mediatype = "0.19.13"
|
||||||
mime = "0.3.16"
|
mime = "0.3.16"
|
||||||
|
pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { version = "1.14.0", features = ["full"] }
|
||||||
|
|
||||||
[target.'cfg(target_os = "linux")'.dependencies]
|
[target.'cfg(target_os = "linux")'.dependencies]
|
||||||
psutil = { version = "3.2.2", optional = true }
|
psutil = { version = "3.2.2", optional = true }
|
||||||
@ -34,4 +39,10 @@ procinfo = { version = "0.4.2", optional = true }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["lighthouse"]
|
default = ["lighthouse"]
|
||||||
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
lighthouse = [
|
||||||
|
"proto_array",
|
||||||
|
"psutil",
|
||||||
|
"procinfo",
|
||||||
|
"store",
|
||||||
|
"slashing_protection",
|
||||||
|
]
|
||||||
|
@ -19,6 +19,7 @@ use self::types::{Error as ResponseError, *};
|
|||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use lighthouse_network::PeerId;
|
use lighthouse_network::PeerId;
|
||||||
|
use pretty_reqwest_error::PrettyReqwestError;
|
||||||
pub use reqwest;
|
pub use reqwest;
|
||||||
use reqwest::{IntoUrl, RequestBuilder, Response};
|
use reqwest::{IntoUrl, RequestBuilder, Response};
|
||||||
pub use reqwest::{StatusCode, Url};
|
pub use reqwest::{StatusCode, Url};
|
||||||
@ -39,7 +40,7 @@ pub const CONSENSUS_VERSION_HEADER: &str = "Eth-Consensus-Version";
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// The `reqwest` client raised an error.
|
/// The `reqwest` client raised an error.
|
||||||
Reqwest(reqwest::Error),
|
HttpClient(PrettyReqwestError),
|
||||||
/// The server returned an error message where the body was able to be parsed.
|
/// The server returned an error message where the body was able to be parsed.
|
||||||
ServerMessage(ErrorMessage),
|
ServerMessage(ErrorMessage),
|
||||||
/// The server returned an error message with an array of errors.
|
/// The server returned an error message with an array of errors.
|
||||||
@ -70,7 +71,7 @@ pub enum Error {
|
|||||||
|
|
||||||
impl From<reqwest::Error> for Error {
|
impl From<reqwest::Error> for Error {
|
||||||
fn from(error: reqwest::Error) -> Self {
|
fn from(error: reqwest::Error) -> Self {
|
||||||
Error::Reqwest(error)
|
Error::HttpClient(error.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,7 +79,7 @@ impl Error {
|
|||||||
/// If the error has a HTTP status code, return it.
|
/// If the error has a HTTP status code, return it.
|
||||||
pub fn status(&self) -> Option<StatusCode> {
|
pub fn status(&self) -> Option<StatusCode> {
|
||||||
match self {
|
match self {
|
||||||
Error::Reqwest(error) => error.status(),
|
Error::HttpClient(error) => error.inner().status(),
|
||||||
Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
||||||
Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(),
|
||||||
Error::StatusCode(status) => Some(*status),
|
Error::StatusCode(status) => Some(*status),
|
||||||
@ -218,7 +219,11 @@ impl BeaconNodeHttpClient {
|
|||||||
|
|
||||||
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
||||||
async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> {
|
async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> {
|
||||||
match self.get_response(url, |b| b).await.optional()? {
|
match self
|
||||||
|
.get_response(url, |b| b.accept(Accept::Json))
|
||||||
|
.await
|
||||||
|
.optional()?
|
||||||
|
{
|
||||||
Some(response) => Ok(Some(response.json().await?)),
|
Some(response) => Ok(Some(response.json().await?)),
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
}
|
}
|
||||||
@ -231,7 +236,7 @@ impl BeaconNodeHttpClient {
|
|||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
) -> Result<Option<T>, Error> {
|
) -> Result<Option<T>, Error> {
|
||||||
let opt_response = self
|
let opt_response = self
|
||||||
.get_response(url, |b| b.timeout(timeout))
|
.get_response(url, |b| b.timeout(timeout).accept(Accept::Json))
|
||||||
.await
|
.await
|
||||||
.optional()?;
|
.optional()?;
|
||||||
match opt_response {
|
match opt_response {
|
||||||
@ -274,7 +279,7 @@ impl BeaconNodeHttpClient {
|
|||||||
.await?
|
.await?
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a HTTP POST request with a custom timeout.
|
/// Perform a HTTP POST request with a custom timeout.
|
||||||
@ -299,7 +304,7 @@ impl BeaconNodeHttpClient {
|
|||||||
.await?
|
.await?
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)
|
.map_err(Error::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generic POST function supporting arbitrary responses and timeouts.
|
/// Generic POST function supporting arbitrary responses and timeouts.
|
||||||
@ -1010,16 +1015,14 @@ impl BeaconNodeHttpClient {
|
|||||||
|
|
||||||
/// `GET beacon/deposit_snapshot`
|
/// `GET beacon/deposit_snapshot`
|
||||||
pub async fn get_deposit_snapshot(&self) -> Result<Option<types::DepositTreeSnapshot>, Error> {
|
pub async fn get_deposit_snapshot(&self) -> Result<Option<types::DepositTreeSnapshot>, Error> {
|
||||||
use ssz::Decode;
|
|
||||||
let mut path = self.eth_path(V1)?;
|
let mut path = self.eth_path(V1)?;
|
||||||
path.path_segments_mut()
|
path.path_segments_mut()
|
||||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
.push("beacon")
|
.push("beacon")
|
||||||
.push("deposit_snapshot");
|
.push("deposit_snapshot");
|
||||||
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot)
|
self.get_opt_with_timeout::<GenericResponse<_>, _>(path, self.timeouts.get_deposit_snapshot)
|
||||||
.await?
|
.await
|
||||||
.map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
|
.map(|opt| opt.map(|r| r.data))
|
||||||
.transpose()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `POST beacon/rewards/sync_committee`
|
/// `POST beacon/rewards/sync_committee`
|
||||||
@ -1671,7 +1674,7 @@ impl BeaconNodeHttpClient {
|
|||||||
.bytes_stream()
|
.bytes_stream()
|
||||||
.map(|next| match next {
|
.map(|next| match next {
|
||||||
Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()),
|
Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()),
|
||||||
Err(e) => Err(Error::Reqwest(e)),
|
Err(e) => Err(Error::HttpClient(e.into())),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,12 +364,12 @@ pub struct DatabaseInfo {
|
|||||||
impl BeaconNodeHttpClient {
|
impl BeaconNodeHttpClient {
|
||||||
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
||||||
async fn get_bytes_opt<U: IntoUrl>(&self, url: U) -> Result<Option<Vec<u8>>, Error> {
|
async fn get_bytes_opt<U: IntoUrl>(&self, url: U) -> Result<Option<Vec<u8>>, Error> {
|
||||||
let response = self.client.get(url).send().await.map_err(Error::Reqwest)?;
|
let response = self.client.get(url).send().await.map_err(Error::from)?;
|
||||||
match ok_or_error(response).await {
|
match ok_or_error(response).await {
|
||||||
Ok(resp) => Ok(Some(
|
Ok(resp) => Ok(Some(
|
||||||
resp.bytes()
|
resp.bytes()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)?
|
.map_err(Error::from)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
)),
|
)),
|
||||||
|
@ -16,6 +16,7 @@ use std::path::Path;
|
|||||||
|
|
||||||
pub use reqwest;
|
pub use reqwest;
|
||||||
pub use reqwest::{Response, StatusCode, Url};
|
pub use reqwest::{Response, StatusCode, Url};
|
||||||
|
use types::graffiti::GraffitiString;
|
||||||
|
|
||||||
/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a
|
/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a
|
||||||
/// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`).
|
/// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`).
|
||||||
@ -169,7 +170,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.map_err(|_| Error::InvalidSignatureHeader)?
|
.map_err(|_| Error::InvalidSignatureHeader)?
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
let body = response.bytes().await.map_err(Error::Reqwest)?;
|
let body = response.bytes().await.map_err(Error::from)?;
|
||||||
|
|
||||||
let message =
|
let message =
|
||||||
Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes");
|
Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes");
|
||||||
@ -221,7 +222,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.headers(self.headers()?)
|
.headers(self.headers()?)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)?;
|
.map_err(Error::from)?;
|
||||||
ok_or_error(response).await
|
ok_or_error(response).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,7 +236,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.await?
|
.await?
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)
|
.map_err(Error::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
/// Perform a HTTP GET request, returning `None` on a 404 error.
|
||||||
@ -265,7 +266,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.json(body)
|
.json(body)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)?;
|
.map_err(Error::from)?;
|
||||||
ok_or_error(response).await
|
ok_or_error(response).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +297,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.json(body)
|
.json(body)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)?;
|
.map_err(Error::from)?;
|
||||||
let response = ok_or_error(response).await?;
|
let response = ok_or_error(response).await?;
|
||||||
self.signed_body(response).await?;
|
self.signed_body(response).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -315,7 +316,7 @@ impl ValidatorClientHttpClient {
|
|||||||
.json(body)
|
.json(body)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(Error::Reqwest)?;
|
.map_err(Error::from)?;
|
||||||
ok_or_error(response).await
|
ok_or_error(response).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,6 +468,7 @@ impl ValidatorClientHttpClient {
|
|||||||
enabled: Option<bool>,
|
enabled: Option<bool>,
|
||||||
gas_limit: Option<u64>,
|
gas_limit: Option<u64>,
|
||||||
builder_proposals: Option<bool>,
|
builder_proposals: Option<bool>,
|
||||||
|
graffiti: Option<GraffitiString>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut path = self.server.full.clone();
|
let mut path = self.server.full.clone();
|
||||||
|
|
||||||
@ -482,6 +484,7 @@ impl ValidatorClientHttpClient {
|
|||||||
enabled,
|
enabled,
|
||||||
gas_limit,
|
gas_limit,
|
||||||
builder_proposals,
|
builder_proposals,
|
||||||
|
graffiti,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -83,6 +83,9 @@ pub struct ValidatorPatchRequest {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub builder_proposals: Option<bool>,
|
pub builder_proposals: Option<bool>,
|
||||||
|
#[serde(default)]
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub graffiti: Option<GraffitiString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
@ -3,10 +3,9 @@
|
|||||||
|
|
||||||
use crate::Error as ServerError;
|
use crate::Error as ServerError;
|
||||||
use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus};
|
use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus};
|
||||||
use mime::{Mime, APPLICATION, JSON, OCTET_STREAM, STAR};
|
use mediatype::{names, MediaType, MediaTypeList};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use ssz_derive::Encode;
|
use ssz_derive::Encode;
|
||||||
use std::cmp::Reverse;
|
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::str::{from_utf8, FromStr};
|
use std::str::{from_utf8, FromStr};
|
||||||
@ -1173,35 +1172,58 @@ impl FromStr for Accept {
|
|||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
let mut mimes = parse_accept(s)?;
|
let media_type_list = MediaTypeList::new(s);
|
||||||
|
|
||||||
// [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2
|
// [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2
|
||||||
// find the highest q-factor supported accept type
|
// find the highest q-factor supported accept type
|
||||||
mimes.sort_by_key(|m| {
|
let mut highest_q = 0_u16;
|
||||||
Reverse(m.get_param("q").map_or(1000_u16, |n| {
|
let mut accept_type = None;
|
||||||
(n.as_ref().parse::<f32>().unwrap_or(0_f32) * 1000_f32) as u16
|
|
||||||
}))
|
const APPLICATION: &str = names::APPLICATION.as_str();
|
||||||
});
|
const OCTET_STREAM: &str = names::OCTET_STREAM.as_str();
|
||||||
mimes
|
const JSON: &str = names::JSON.as_str();
|
||||||
.into_iter()
|
const STAR: &str = names::_STAR.as_str();
|
||||||
.find_map(|m| match (m.type_(), m.subtype()) {
|
const Q: &str = names::Q.as_str();
|
||||||
|
|
||||||
|
media_type_list.into_iter().for_each(|item| {
|
||||||
|
if let Ok(MediaType {
|
||||||
|
ty,
|
||||||
|
subty,
|
||||||
|
suffix: _,
|
||||||
|
params,
|
||||||
|
}) = item
|
||||||
|
{
|
||||||
|
let q_accept = match (ty.as_str(), subty.as_str()) {
|
||||||
(APPLICATION, OCTET_STREAM) => Some(Accept::Ssz),
|
(APPLICATION, OCTET_STREAM) => Some(Accept::Ssz),
|
||||||
(APPLICATION, JSON) => Some(Accept::Json),
|
(APPLICATION, JSON) => Some(Accept::Json),
|
||||||
(STAR, STAR) => Some(Accept::Any),
|
(STAR, STAR) => Some(Accept::Any),
|
||||||
_ => None,
|
_ => None,
|
||||||
})
|
|
||||||
.ok_or_else(|| "accept header is not supported".to_string())
|
|
||||||
}
|
}
|
||||||
}
|
.map(|item_accept_type| {
|
||||||
|
let q_val = params
|
||||||
fn parse_accept(accept: &str) -> Result<Vec<Mime>, String> {
|
.iter()
|
||||||
accept
|
.find_map(|(n, v)| match n.as_str() {
|
||||||
.split(',')
|
Q => {
|
||||||
.map(|part| {
|
Some((v.as_str().parse::<f32>().unwrap_or(0_f32) * 1000_f32) as u16)
|
||||||
part.parse()
|
}
|
||||||
.map_err(|e| format!("error parsing Accept header: {}", e))
|
_ => None,
|
||||||
})
|
})
|
||||||
.collect()
|
.or(Some(1000_u16));
|
||||||
|
|
||||||
|
(q_val.unwrap(), item_accept_type)
|
||||||
|
});
|
||||||
|
|
||||||
|
match q_accept {
|
||||||
|
Some((q, accept)) if q > highest_q => {
|
||||||
|
highest_q = q;
|
||||||
|
accept_type = Some(accept);
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
accept_type.ok_or_else(|| "accept header is not supported".to_string())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@ -1269,7 +1291,12 @@ mod tests {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
Accept::from_str("text/plain"),
|
Accept::from_str("text/plain"),
|
||||||
Err("accept header is not supported".to_string())
|
Err("accept header is not supported".to_string())
|
||||||
)
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Accept::from_str("application/json;message=\"Hello, world!\";q=0.3,*/*;q=0.6").unwrap(),
|
||||||
|
Accept::Any
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,4 +20,4 @@ types = { path = "../../consensus/types"}
|
|||||||
kzg = { path = "../../crypto/kzg" }
|
kzg = { path = "../../crypto/kzg" }
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
eth2_config = { path = "../eth2_config"}
|
eth2_config = { path = "../eth2_config"}
|
||||||
discv5 = "0.2.2"
|
discv5 = "0.3.0"
|
@ -38,7 +38,7 @@ BELLATRIX_FORK_VERSION: 0x02000064
|
|||||||
BELLATRIX_FORK_EPOCH: 385536
|
BELLATRIX_FORK_EPOCH: 385536
|
||||||
# Capella
|
# Capella
|
||||||
CAPELLA_FORK_VERSION: 0x03000064
|
CAPELLA_FORK_VERSION: 0x03000064
|
||||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
CAPELLA_FORK_EPOCH: 648704
|
||||||
# Deneb
|
# Deneb
|
||||||
DENEB_FORK_VERSION: 0x04000064
|
DENEB_FORK_VERSION: 0x04000064
|
||||||
DENEB_FORK_EPOCH: 18446744073709551615
|
DENEB_FORK_EPOCH: 18446744073709551615
|
||||||
|
10
common/pretty_reqwest_error/Cargo.toml
Normal file
10
common/pretty_reqwest_error/Cargo.toml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[package]
|
||||||
|
name = "pretty_reqwest_error"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
||||||
|
sensitive_url = { path = "../sensitive_url" }
|
62
common/pretty_reqwest_error/src/lib.rs
Normal file
62
common/pretty_reqwest_error/src/lib.rs
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
use sensitive_url::SensitiveUrl;
|
||||||
|
use std::error::Error as StdError;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
pub struct PrettyReqwestError(reqwest::Error);
|
||||||
|
|
||||||
|
impl PrettyReqwestError {
|
||||||
|
pub fn inner(&self) -> &reqwest::Error {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for PrettyReqwestError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
if let Some(url) = self.0.url() {
|
||||||
|
if let Ok(url) = SensitiveUrl::new(url.clone()) {
|
||||||
|
write!(f, "url: {}", url)?;
|
||||||
|
} else {
|
||||||
|
write!(f, "url: unable_to_parse")?;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let kind = if self.0.is_builder() {
|
||||||
|
"builder"
|
||||||
|
} else if self.0.is_redirect() {
|
||||||
|
"redirect"
|
||||||
|
} else if self.0.is_status() {
|
||||||
|
"status"
|
||||||
|
} else if self.0.is_timeout() {
|
||||||
|
"timeout"
|
||||||
|
} else if self.0.is_request() {
|
||||||
|
"request"
|
||||||
|
} else if self.0.is_connect() {
|
||||||
|
"connect"
|
||||||
|
} else if self.0.is_body() {
|
||||||
|
"body"
|
||||||
|
} else if self.0.is_decode() {
|
||||||
|
"decode"
|
||||||
|
} else {
|
||||||
|
"unknown"
|
||||||
|
};
|
||||||
|
write!(f, ", kind: {}", kind)?;
|
||||||
|
|
||||||
|
if let Some(status) = self.0.status() {
|
||||||
|
write!(f, ", status_code: {}", status)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref source) = self.0.source() {
|
||||||
|
write!(f, ", detail: {}", source)?;
|
||||||
|
} else {
|
||||||
|
write!(f, ", source: unknown")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<reqwest::Error> for PrettyReqwestError {
|
||||||
|
fn from(inner: reqwest::Error) -> Self {
|
||||||
|
Self(inner)
|
||||||
|
}
|
||||||
|
}
|
@ -75,7 +75,7 @@ impl SensitiveUrl {
|
|||||||
SensitiveUrl::new(surl)
|
SensitiveUrl::new(surl)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(full: Url) -> Result<Self, SensitiveError> {
|
pub fn new(full: Url) -> Result<Self, SensitiveError> {
|
||||||
let mut redacted = full.clone();
|
let mut redacted = full.clone();
|
||||||
redacted
|
redacted
|
||||||
.path_segments_mut()
|
.path_segments_mut()
|
||||||
|
@ -6,7 +6,7 @@ edition = "2021"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethereum-types = "0.14.1"
|
ethereum-types = "0.14.1"
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
ethereum_hashing = "1.0.0-beta.2"
|
ethereum_hashing = "1.0.0-beta.2"
|
||||||
ethereum_ssz_derive = "0.5.0"
|
ethereum_ssz_derive = "0.5.0"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
|
@ -174,21 +174,6 @@ impl<T> From<proto_array::Error> for Error<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates whether the unrealized justification of a block should be calculated and tracked.
|
|
||||||
/// If a block has been finalized, this can be set to false. This is useful when syncing finalized
|
|
||||||
/// portions of the chain. Otherwise this should always be set to true.
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
||||||
pub enum CountUnrealized {
|
|
||||||
True,
|
|
||||||
False,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CountUnrealized {
|
|
||||||
pub fn is_true(&self) -> bool {
|
|
||||||
matches!(self, CountUnrealized::True)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates if a block has been verified by an execution payload.
|
/// Indicates if a block has been verified by an execution payload.
|
||||||
///
|
///
|
||||||
/// There is no variant for "invalid", since such a block should never be added to fork choice.
|
/// There is no variant for "invalid", since such a block should never be added to fork choice.
|
||||||
@ -661,8 +646,14 @@ where
|
|||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
payload_verification_status: PayloadVerificationStatus,
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
count_unrealized: CountUnrealized,
|
|
||||||
) -> Result<(), Error<T::Error>> {
|
) -> Result<(), Error<T::Error>> {
|
||||||
|
// If this block has already been processed we do not need to reprocess it.
|
||||||
|
// We check this immediately in case re-processing the block mutates some property of the
|
||||||
|
// global fork choice store, e.g. the justified checkpoints or the proposer boost root.
|
||||||
|
if self.proto_array.contains_block(&block_root) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
// Provide the slot (as per the system clock) to the `fc_store` and then return its view of
|
// Provide the slot (as per the system clock) to the `fc_store` and then return its view of
|
||||||
// the current slot. The `fc_store` will ensure that the `current_slot` is never
|
// the current slot. The `fc_store` will ensure that the `current_slot` is never
|
||||||
// decreasing, a property which we must maintain.
|
// decreasing, a property which we must maintain.
|
||||||
@ -728,9 +719,6 @@ where
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Update unrealized justified/finalized checkpoints.
|
// Update unrealized justified/finalized checkpoints.
|
||||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized
|
|
||||||
.is_true()
|
|
||||||
{
|
|
||||||
let block_epoch = block.slot().epoch(E::slots_per_epoch());
|
let block_epoch = block.slot().epoch(E::slots_per_epoch());
|
||||||
|
|
||||||
// If the parent checkpoints are already at the same epoch as the block being imported,
|
// If the parent checkpoints are already at the same epoch as the block being imported,
|
||||||
@ -748,8 +736,7 @@ where
|
|||||||
.unrealized_justified_checkpoint
|
.unrealized_justified_checkpoint
|
||||||
.zip(parent_block.unrealized_finalized_checkpoint)
|
.zip(parent_block.unrealized_finalized_checkpoint)
|
||||||
.filter(|(parent_justified, parent_finalized)| {
|
.filter(|(parent_justified, parent_finalized)| {
|
||||||
parent_justified.epoch == block_epoch
|
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
|
||||||
&& parent_finalized.epoch + 1 >= block_epoch
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
|
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
|
||||||
@ -814,14 +801,6 @@ where
|
|||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
|
||||||
Some(unrealized_justified_checkpoint),
|
|
||||||
Some(unrealized_finalized_checkpoint),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(None, None)
|
|
||||||
};
|
|
||||||
|
|
||||||
let target_slot = block
|
let target_slot = block
|
||||||
.slot()
|
.slot()
|
||||||
.epoch(E::slots_per_epoch())
|
.epoch(E::slots_per_epoch())
|
||||||
@ -891,8 +870,8 @@ where
|
|||||||
justified_checkpoint: state.current_justified_checkpoint(),
|
justified_checkpoint: state.current_justified_checkpoint(),
|
||||||
finalized_checkpoint: state.finalized_checkpoint(),
|
finalized_checkpoint: state.finalized_checkpoint(),
|
||||||
execution_status,
|
execution_status,
|
||||||
unrealized_justified_checkpoint,
|
unrealized_justified_checkpoint: Some(unrealized_justified_checkpoint),
|
||||||
unrealized_finalized_checkpoint,
|
unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint),
|
||||||
},
|
},
|
||||||
current_slot,
|
current_slot,
|
||||||
)?;
|
)?;
|
||||||
|
@ -2,9 +2,9 @@ mod fork_choice;
|
|||||||
mod fork_choice_store;
|
mod fork_choice_store;
|
||||||
|
|
||||||
pub use crate::fork_choice::{
|
pub use crate::fork_choice::{
|
||||||
AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView,
|
AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters,
|
||||||
ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice,
|
||||||
PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses,
|
QueuedAttestation, ResetPayloadStatuses,
|
||||||
};
|
};
|
||||||
pub use fork_choice_store::ForkChoiceStore;
|
pub use fork_choice_store::ForkChoiceStore;
|
||||||
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
|
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
|
||||||
|
@ -12,8 +12,7 @@ use beacon_chain::{
|
|||||||
StateSkipConfig, WhenSlotSkipped,
|
StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
|
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation,
|
||||||
QueuedAttestation,
|
|
||||||
};
|
};
|
||||||
use store::MemoryStore;
|
use store::MemoryStore;
|
||||||
use types::{
|
use types::{
|
||||||
@ -288,7 +287,6 @@ impl ForkChoiceTest {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Verified,
|
PayloadVerificationStatus::Verified,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
self
|
self
|
||||||
@ -331,7 +329,6 @@ impl ForkChoiceTest {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Verified,
|
PayloadVerificationStatus::Verified,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
)
|
)
|
||||||
.err()
|
.err()
|
||||||
.expect("on_block did not return an error");
|
.expect("on_block did not return an error");
|
||||||
|
@ -15,7 +15,7 @@ integer-sqrt = "0.1.5"
|
|||||||
itertools = "0.10.0"
|
itertools = "0.10.0"
|
||||||
ethereum_ssz = "0.5.0"
|
ethereum_ssz = "0.5.0"
|
||||||
ethereum_ssz_derive = "0.5.0"
|
ethereum_ssz_derive = "0.5.0"
|
||||||
ssz_types = "0.5.0"
|
ssz_types = "0.5.3"
|
||||||
merkle_proof = { path = "../merkle_proof" }
|
merkle_proof = { path = "../merkle_proof" }
|
||||||
safe_arith = { path = "../safe_arith" }
|
safe_arith = { path = "../safe_arith" }
|
||||||
tree_hash = "0.5.0"
|
tree_hash = "0.5.0"
|
||||||
|
@ -28,7 +28,7 @@ serde_derive = "1.0.116"
|
|||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] }
|
ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] }
|
||||||
ethereum_ssz_derive = "0.5.0"
|
ethereum_ssz_derive = "0.5.0"
|
||||||
ssz_types = { version = "0.5.0", features = ["arbitrary"] }
|
ssz_types = { version = "0.5.3", features = ["arbitrary"] }
|
||||||
swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] }
|
swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] }
|
||||||
test_random_derive = { path = "../../common/test_random_derive" }
|
test_random_derive = { path = "../../common/test_random_derive" }
|
||||||
tree_hash = { version = "0.5.0", features = ["arbitrary"] }
|
tree_hash = { version = "0.5.0", features = ["arbitrary"] }
|
||||||
|
@ -856,7 +856,7 @@ impl ChainSpec {
|
|||||||
* Capella hard fork params
|
* Capella hard fork params
|
||||||
*/
|
*/
|
||||||
capella_fork_version: [0x03, 0x00, 0x00, 0x64],
|
capella_fork_version: [0x03, 0x00, 0x00, 0x64],
|
||||||
capella_fork_epoch: None,
|
capella_fork_epoch: Some(Epoch::new(648704)),
|
||||||
max_validators_per_withdrawals_sweep: 8192,
|
max_validators_per_withdrawals_sweep: 8192,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -30,8 +30,10 @@ impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock {
|
|||||||
pub struct DepositTreeSnapshot {
|
pub struct DepositTreeSnapshot {
|
||||||
pub finalized: Vec<Hash256>,
|
pub finalized: Vec<Hash256>,
|
||||||
pub deposit_root: Hash256,
|
pub deposit_root: Hash256,
|
||||||
|
#[serde(with = "serde_utils::quoted_u64")]
|
||||||
pub deposit_count: u64,
|
pub deposit_count: u64,
|
||||||
pub execution_block_hash: Hash256,
|
pub execution_block_hash: Hash256,
|
||||||
|
#[serde(with = "serde_utils::quoted_u64")]
|
||||||
pub execution_block_height: u64,
|
pub execution_block_height: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ impl CommandLineTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn run_with_ip(&mut self) -> CompletedTest<BootNodeConfigSerialization> {
|
fn run_with_ip(&mut self) -> CompletedTest<BootNodeConfigSerialization> {
|
||||||
self.cmd.arg(IP_ADDRESS);
|
self.cmd.arg("--enr-address").arg(IP_ADDRESS);
|
||||||
self.run()
|
self.run()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -67,7 +67,13 @@ fn port_flag() {
|
|||||||
.flag("port", Some(port.to_string().as_str()))
|
.flag("port", Some(port.to_string().as_str()))
|
||||||
.run_with_ip()
|
.run_with_ip()
|
||||||
.with_config(|config| {
|
.with_config(|config| {
|
||||||
assert_eq!(config.listen_socket.port(), port);
|
assert_eq!(
|
||||||
|
config
|
||||||
|
.ipv4_listen_socket
|
||||||
|
.expect("Bootnode should be listening on IPv4")
|
||||||
|
.port(),
|
||||||
|
port
|
||||||
|
);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,7 +84,13 @@ fn listen_address_flag() {
|
|||||||
.flag("listen-address", Some("127.0.0.2"))
|
.flag("listen-address", Some("127.0.0.2"))
|
||||||
.run_with_ip()
|
.run_with_ip()
|
||||||
.with_config(|config| {
|
.with_config(|config| {
|
||||||
assert_eq!(config.listen_socket.ip(), addr);
|
assert_eq!(
|
||||||
|
config
|
||||||
|
.ipv4_listen_socket
|
||||||
|
.expect("Bootnode should be listening on IPv4")
|
||||||
|
.ip(),
|
||||||
|
&addr
|
||||||
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,3 +499,24 @@ fn latency_measurement_service() {
|
|||||||
assert!(!config.enable_latency_measurement_service);
|
assert!(!config.enable_latency_measurement_service);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn validator_registration_batch_size() {
|
||||||
|
CommandLineTest::new().run().with_config(|config| {
|
||||||
|
assert_eq!(config.validator_registration_batch_size, 500);
|
||||||
|
});
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("validator-registration-batch-size", Some("100"))
|
||||||
|
.run()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(config.validator_registration_batch_size, 100);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn validator_registration_batch_size_zero_value() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("validator-registration-batch-size", Some("0"))
|
||||||
|
.run();
|
||||||
|
}
|
||||||
|
@ -7,7 +7,7 @@ use beacon_chain::{
|
|||||||
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
||||||
},
|
},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainTypes, CachedHead, CountUnrealized, NotifyExecutionLayer,
|
BeaconChainTypes, CachedHead, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@ -381,7 +381,6 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
let result = self.block_on_dangerous(self.harness.chain.process_block(
|
||||||
block_root,
|
block_root,
|
||||||
block.clone(),
|
block.clone(),
|
||||||
CountUnrealized::True,
|
|
||||||
NotifyExecutionLayer::Yes,
|
NotifyExecutionLayer::Yes,
|
||||||
))?;
|
))?;
|
||||||
if result.is_ok() != valid {
|
if result.is_ok() != valid {
|
||||||
@ -441,7 +440,6 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
&state,
|
&state,
|
||||||
PayloadVerificationStatus::Irrelevant,
|
PayloadVerificationStatus::Irrelevant,
|
||||||
&self.harness.chain.spec,
|
&self.harness.chain.spec,
|
||||||
CountUnrealized::True,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if result.is_ok() {
|
if result.is_ok() {
|
||||||
|
@ -333,6 +333,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("true")
|
.default_value("true")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("validator-registration-batch-size")
|
||||||
|
.long("validator-registration-batch-size")
|
||||||
|
.value_name("INTEGER")
|
||||||
|
.help("Defines the number of validators per \
|
||||||
|
validator/register_validator request sent to the BN. This value \
|
||||||
|
can be reduced to avoid timeouts from builders.")
|
||||||
|
.default_value("500")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
/*
|
/*
|
||||||
* Experimental/development options.
|
* Experimental/development options.
|
||||||
*/
|
*/
|
||||||
|
@ -77,6 +77,8 @@ pub struct Config {
|
|||||||
pub disable_run_on_all: bool,
|
pub disable_run_on_all: bool,
|
||||||
/// Enables a service which attempts to measure latency between the VC and BNs.
|
/// Enables a service which attempts to measure latency between the VC and BNs.
|
||||||
pub enable_latency_measurement_service: bool,
|
pub enable_latency_measurement_service: bool,
|
||||||
|
/// Defines the number of validators per `validator/register_validator` request sent to the BN.
|
||||||
|
pub validator_registration_batch_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -117,6 +119,7 @@ impl Default for Config {
|
|||||||
gas_limit: None,
|
gas_limit: None,
|
||||||
disable_run_on_all: false,
|
disable_run_on_all: false,
|
||||||
enable_latency_measurement_service: true,
|
enable_latency_measurement_service: true,
|
||||||
|
validator_registration_batch_size: 500,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -380,6 +383,12 @@ impl Config {
|
|||||||
config.enable_latency_measurement_service =
|
config.enable_latency_measurement_service =
|
||||||
parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true);
|
parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true);
|
||||||
|
|
||||||
|
config.validator_registration_batch_size =
|
||||||
|
parse_required(cli_args, "validator-registration-batch-size")?;
|
||||||
|
if config.validator_registration_batch_size == 0 {
|
||||||
|
return Err("validator-registration-batch-size cannot be 0".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Experimental
|
* Experimental
|
||||||
*/
|
*/
|
||||||
|
@ -357,7 +357,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(warp::path("graffiti"))
|
.and(warp::path("graffiti"))
|
||||||
.and(warp::path::end())
|
.and(warp::path::end())
|
||||||
.and(validator_store_filter.clone())
|
.and(validator_store_filter.clone())
|
||||||
.and(graffiti_file_filter)
|
.and(graffiti_file_filter.clone())
|
||||||
.and(graffiti_flag_filter)
|
.and(graffiti_flag_filter)
|
||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
.and(log_filter.clone())
|
.and(log_filter.clone())
|
||||||
@ -617,18 +617,27 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(warp::path::end())
|
.and(warp::path::end())
|
||||||
.and(warp::body::json())
|
.and(warp::body::json())
|
||||||
.and(validator_store_filter.clone())
|
.and(validator_store_filter.clone())
|
||||||
|
.and(graffiti_file_filter)
|
||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
.and(task_executor_filter.clone())
|
.and(task_executor_filter.clone())
|
||||||
.and_then(
|
.and_then(
|
||||||
|validator_pubkey: PublicKey,
|
|validator_pubkey: PublicKey,
|
||||||
body: api_types::ValidatorPatchRequest,
|
body: api_types::ValidatorPatchRequest,
|
||||||
validator_store: Arc<ValidatorStore<T, E>>,
|
validator_store: Arc<ValidatorStore<T, E>>,
|
||||||
|
graffiti_file: Option<GraffitiFile>,
|
||||||
signer,
|
signer,
|
||||||
task_executor: TaskExecutor| {
|
task_executor: TaskExecutor| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
|
if body.graffiti.is_some() && graffiti_file.is_some() {
|
||||||
|
return Err(warp_utils::reject::custom_bad_request(
|
||||||
|
"Unable to update graffiti as the \"--graffiti-file\" flag is set"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let maybe_graffiti = body.graffiti.clone().map(Into::into);
|
||||||
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
||||||
let mut initialized_validators = initialized_validators_rw_lock.write();
|
let mut initialized_validators = initialized_validators_rw_lock.write();
|
||||||
|
|
||||||
match (
|
match (
|
||||||
initialized_validators.is_enabled(&validator_pubkey),
|
initialized_validators.is_enabled(&validator_pubkey),
|
||||||
initialized_validators.validator(&validator_pubkey.compress()),
|
initialized_validators.validator(&validator_pubkey.compress()),
|
||||||
@ -641,7 +650,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
if Some(is_enabled) == body.enabled
|
if Some(is_enabled) == body.enabled
|
||||||
&& initialized_validator.get_gas_limit() == body.gas_limit
|
&& initialized_validator.get_gas_limit() == body.gas_limit
|
||||||
&& initialized_validator.get_builder_proposals()
|
&& initialized_validator.get_builder_proposals()
|
||||||
== body.builder_proposals =>
|
== body.builder_proposals
|
||||||
|
&& initialized_validator.get_graffiti() == maybe_graffiti =>
|
||||||
{
|
{
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -654,6 +664,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
body.enabled,
|
body.enabled,
|
||||||
body.gas_limit,
|
body.gas_limit,
|
||||||
body.builder_proposals,
|
body.builder_proposals,
|
||||||
|
body.graffiti,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user