Merge branch 'unstable' into off-4844
5
.github/workflows/docker.yml
vendored
@ -65,7 +65,7 @@ jobs:
|
|||||||
x86_64-portable]
|
x86_64-portable]
|
||||||
features: [
|
features: [
|
||||||
{version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"},
|
{version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"},
|
||||||
{version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"}
|
{version_suffix: "-dev", env: "jemalloc,spec-minimal"}
|
||||||
]
|
]
|
||||||
include:
|
include:
|
||||||
- profile: maxperf
|
- profile: maxperf
|
||||||
@ -77,8 +77,6 @@ jobs:
|
|||||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||||
FEATURE_SUFFIX: ${{ matrix.features.version_suffix }}
|
FEATURE_SUFFIX: ${{ matrix.features.version_suffix }}
|
||||||
FEATURES: ${{ matrix.features.env }}
|
|
||||||
CROSS_FEATURES: ${{ matrix.features.env }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
@ -118,7 +116,6 @@ jobs:
|
|||||||
--platform=linux/${SHORT_ARCH} \
|
--platform=linux/${SHORT_ARCH} \
|
||||||
--file ./Dockerfile.cross . \
|
--file ./Dockerfile.cross . \
|
||||||
--tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \
|
--tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \
|
||||||
--build-arg FEATURES=${FEATURES} \
|
|
||||||
--provenance=false \
|
--provenance=false \
|
||||||
--push
|
--push
|
||||||
build-docker-multiarch:
|
build-docker-multiarch:
|
||||||
|
5
.github/workflows/test-suite.yml
vendored
@ -10,7 +10,8 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
env:
|
env:
|
||||||
# Deny warnings in CI
|
# Deny warnings in CI
|
||||||
RUSTFLAGS: "-D warnings"
|
# Disable debug info (see https://github.com/sigp/lighthouse/issues/4005)
|
||||||
|
RUSTFLAGS: "-D warnings -C debuginfo=0"
|
||||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||||
PINNED_NIGHTLY: nightly-2022-12-15
|
PINNED_NIGHTLY: nightly-2022-12-15
|
||||||
# Prevent Github API rate limiting.
|
# Prevent Github API rate limiting.
|
||||||
@ -280,7 +281,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.17'
|
go-version: '1.20'
|
||||||
- uses: actions/setup-dotnet@v3
|
- uses: actions/setup-dotnet@v3
|
||||||
with:
|
with:
|
||||||
dotnet-version: '6.0.201'
|
dotnet-version: '6.0.201'
|
||||||
|
2
.gitignore
vendored
@ -13,6 +13,8 @@ genesis.ssz
|
|||||||
|
|
||||||
# IntelliJ
|
# IntelliJ
|
||||||
/*.iml
|
/*.iml
|
||||||
|
<<<<<<< HEAD
|
||||||
|
|
||||||
# VSCode
|
# VSCode
|
||||||
/.vscode
|
/.vscode
|
||||||
|
.idea
|
||||||
|
117
Cargo.lock
generated
@ -463,7 +463,7 @@ dependencies = [
|
|||||||
"http",
|
"http",
|
||||||
"http-body",
|
"http-body",
|
||||||
"hyper",
|
"hyper",
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"matchit",
|
"matchit",
|
||||||
"memchr",
|
"memchr",
|
||||||
"mime",
|
"mime",
|
||||||
@ -621,7 +621,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"beacon_chain",
|
"beacon_chain",
|
||||||
"clap",
|
"clap",
|
||||||
@ -790,7 +790,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"beacon_node",
|
"beacon_node",
|
||||||
"clap",
|
"clap",
|
||||||
@ -820,18 +820,6 @@ version = "0.4.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
|
checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "bstr"
|
|
||||||
version = "0.2.17"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
|
|
||||||
dependencies = [
|
|
||||||
"lazy_static",
|
|
||||||
"memchr",
|
|
||||||
"regex-automata",
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "buf_redux"
|
name = "buf_redux"
|
||||||
version = "0.8.4"
|
version = "0.8.4"
|
||||||
@ -1372,13 +1360,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "csv"
|
name = "csv"
|
||||||
version = "1.1.6"
|
version = "1.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
|
checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bstr",
|
|
||||||
"csv-core",
|
"csv-core",
|
||||||
"itoa 0.4.8",
|
"itoa",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -1449,9 +1436,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx"
|
name = "cxx"
|
||||||
version = "1.0.89"
|
version = "1.0.90"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9"
|
checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cxxbridge-flags",
|
"cxxbridge-flags",
|
||||||
@ -1461,9 +1448,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx-build"
|
name = "cxx-build"
|
||||||
version = "1.0.89"
|
version = "1.0.90"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d"
|
checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"codespan-reporting",
|
"codespan-reporting",
|
||||||
@ -1476,15 +1463,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-flags"
|
name = "cxxbridge-flags"
|
||||||
version = "1.0.89"
|
version = "1.0.90"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a"
|
checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-macro"
|
name = "cxxbridge-macro"
|
||||||
version = "1.0.89"
|
version = "1.0.90"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2"
|
checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -2496,6 +2483,7 @@ dependencies = [
|
|||||||
"fork_choice",
|
"fork_choice",
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
|
"logging",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@ -2579,9 +2567,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fastrand"
|
name = "fastrand"
|
||||||
version = "1.8.0"
|
version = "1.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
|
checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"instant",
|
"instant",
|
||||||
]
|
]
|
||||||
@ -3003,7 +2991,7 @@ dependencies = [
|
|||||||
"indexmap",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.4",
|
"tokio-util 0.7.7",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -3212,7 +3200,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"fnv",
|
"fnv",
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3242,6 +3230,7 @@ dependencies = [
|
|||||||
"environment",
|
"environment",
|
||||||
"eth1",
|
"eth1",
|
||||||
"eth2",
|
"eth2",
|
||||||
|
"eth2_serde_utils",
|
||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"execution_layer",
|
"execution_layer",
|
||||||
"futures",
|
"futures",
|
||||||
@ -3331,7 +3320,7 @@ dependencies = [
|
|||||||
"http-body",
|
"http-body",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
@ -3495,7 +3484,7 @@ version = "0.6.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f"
|
checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-scale-codec 3.3.0",
|
"parity-scale-codec 3.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3622,12 +3611,6 @@ dependencies = [
|
|||||||
"either",
|
"either",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "itoa"
|
|
||||||
version = "0.4.8"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itoa"
|
name = "itoa"
|
||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
@ -3769,7 +3752,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lcli"
|
name = "lcli"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"account_utils",
|
"account_utils",
|
||||||
"beacon_chain",
|
"beacon_chain",
|
||||||
@ -4266,7 +4249,7 @@ dependencies = [
|
|||||||
"thiserror",
|
"thiserror",
|
||||||
"tinytemplate",
|
"tinytemplate",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.4",
|
"tokio-util 0.7.7",
|
||||||
"webrtc",
|
"webrtc",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -4375,7 +4358,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lighthouse"
|
name = "lighthouse"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"account_manager",
|
"account_manager",
|
||||||
"account_utils",
|
"account_utils",
|
||||||
@ -4442,6 +4425,7 @@ dependencies = [
|
|||||||
"lighthouse_metrics",
|
"lighthouse_metrics",
|
||||||
"lighthouse_version",
|
"lighthouse_version",
|
||||||
"lru 0.7.8",
|
"lru 0.7.8",
|
||||||
|
"lru_cache",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"prometheus-client",
|
"prometheus-client",
|
||||||
"quickcheck",
|
"quickcheck",
|
||||||
@ -4784,14 +4768,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "0.8.5"
|
version = "0.8.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
|
checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||||
"windows-sys 0.42.0",
|
"windows-sys 0.45.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -5277,9 +5261,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.17.0"
|
version = "1.17.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
|
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "oneshot_broadcast"
|
name = "oneshot_broadcast"
|
||||||
@ -5458,9 +5442,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-scale-codec"
|
name = "parity-scale-codec"
|
||||||
version = "3.3.0"
|
version = "3.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed"
|
checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayvec",
|
"arrayvec",
|
||||||
"bitvec 1.0.1",
|
"bitvec 1.0.1",
|
||||||
@ -5900,7 +5884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c"
|
checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dtoa",
|
"dtoa",
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"prometheus-client-derive-text-encode",
|
"prometheus-client-derive-text-encode",
|
||||||
]
|
]
|
||||||
@ -6337,7 +6321,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-native-tls",
|
||||||
"tokio-rustls 0.23.4",
|
"tokio-rustls 0.23.4",
|
||||||
"tokio-util 0.7.4",
|
"tokio-util 0.7.7",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"url",
|
"url",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
@ -6622,7 +6606,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"derive_more",
|
"derive_more",
|
||||||
"parity-scale-codec 3.3.0",
|
"parity-scale-codec 3.4.0",
|
||||||
"scale-info-derive",
|
"scale-info-derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6868,11 +6852,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.92"
|
version = "1.0.93"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
|
checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -6895,7 +6879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
|
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"ryu",
|
"ryu",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@ -7032,9 +7016,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "signal-hook-registry"
|
name = "signal-hook-registry"
|
||||||
version = "1.4.0"
|
version = "1.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
|
checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
@ -7733,10 +7717,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thread_local"
|
name = "thread_local"
|
||||||
version = "1.1.4"
|
version = "1.1.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
|
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -7766,7 +7751,7 @@ version = "0.3.17"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
|
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa 1.0.5",
|
"itoa",
|
||||||
"libc",
|
"libc",
|
||||||
"num_threads",
|
"num_threads",
|
||||||
"serde",
|
"serde",
|
||||||
@ -7935,7 +7920,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util 0.7.4",
|
"tokio-util 0.7.7",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -7985,9 +7970,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-util"
|
name = "tokio-util"
|
||||||
version = "0.7.4"
|
version = "0.7.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
|
checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@ -9033,9 +9018,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webrtc-ice"
|
name = "webrtc-ice"
|
||||||
version = "0.9.0"
|
version = "0.9.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7"
|
checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
15
Makefile
@ -38,15 +38,24 @@ PROFILE ?= release
|
|||||||
# they run for different forks.
|
# they run for different forks.
|
||||||
FORKS=phase0 altair merge capella eip4844
|
FORKS=phase0 altair merge capella eip4844
|
||||||
|
|
||||||
|
# Extra flags for Cargo
|
||||||
|
CARGO_INSTALL_EXTRA_FLAGS?=
|
||||||
|
|
||||||
# Builds the Lighthouse binary in release (optimized).
|
# Builds the Lighthouse binary in release (optimized).
|
||||||
#
|
#
|
||||||
# Binaries will most likely be found in `./target/release`
|
# Binaries will most likely be found in `./target/release`
|
||||||
install:
|
install:
|
||||||
cargo install --path lighthouse --force --locked --features "$(FEATURES)" --profile "$(PROFILE)"
|
cargo install --path lighthouse --force --locked \
|
||||||
|
--features "$(FEATURES)" \
|
||||||
|
--profile "$(PROFILE)" \
|
||||||
|
$(CARGO_INSTALL_EXTRA_FLAGS)
|
||||||
|
|
||||||
# Builds the lcli binary in release (optimized).
|
# Builds the lcli binary in release (optimized).
|
||||||
install-lcli:
|
install-lcli:
|
||||||
cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)"
|
cargo install --path lcli --force --locked \
|
||||||
|
--features "$(FEATURES)" \
|
||||||
|
--profile "$(PROFILE)" \
|
||||||
|
$(CARGO_INSTALL_EXTRA_FLAGS)
|
||||||
|
|
||||||
# The following commands use `cross` to build a cross-compile.
|
# The following commands use `cross` to build a cross-compile.
|
||||||
#
|
#
|
||||||
@ -124,7 +133,7 @@ run-ef-tests:
|
|||||||
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
|
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
|
||||||
|
|
||||||
test-beacon-chain-%:
|
test-beacon-chain-%:
|
||||||
env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain
|
env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain
|
||||||
|
|
||||||
# Run the tests in the `operation_pool` crate for all known forks.
|
# Run the tests in the `operation_pool` crate for all known forks.
|
||||||
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
|
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ clap_utils = { path = "../common/clap_utils" }
|
|||||||
hyper = "0.14.4"
|
hyper = "0.14.4"
|
||||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
slasher = { path = "../slasher" }
|
slasher = { path = "../slasher", default-features = false }
|
||||||
monitoring_api = { path = "../common/monitoring_api" }
|
monitoring_api = { path = "../common/monitoring_api" }
|
||||||
sensitive_url = { path = "../common/sensitive_url" }
|
sensitive_url = { path = "../common/sensitive_url" }
|
||||||
http_api = { path = "http_api" }
|
http_api = { path = "http_api" }
|
||||||
|
@ -57,7 +57,7 @@ fork_choice = { path = "../../consensus/fork_choice" }
|
|||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
derivative = "2.1.1"
|
derivative = "2.1.1"
|
||||||
itertools = "0.10.0"
|
itertools = "0.10.0"
|
||||||
slasher = { path = "../../slasher" }
|
slasher = { path = "../../slasher", default-features = false }
|
||||||
eth2 = { path = "../../common/eth2" }
|
eth2 = { path = "../../common/eth2" }
|
||||||
strum = { version = "0.24.0", features = ["derive"] }
|
strum = { version = "0.24.0", features = ["derive"] }
|
||||||
logging = { path = "../../common/logging" }
|
logging = { path = "../../common/logging" }
|
||||||
|
@ -21,8 +21,6 @@ pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300;
|
|||||||
pub enum CapellaReadiness {
|
pub enum CapellaReadiness {
|
||||||
/// The execution engine is capella-enabled (as far as we can tell)
|
/// The execution engine is capella-enabled (as far as we can tell)
|
||||||
Ready,
|
Ready,
|
||||||
/// The EL can be reached and has the correct configuration, however it's not yet synced.
|
|
||||||
NotSynced,
|
|
||||||
/// We are connected to an execution engine which doesn't support the V2 engine api methods
|
/// We are connected to an execution engine which doesn't support the V2 engine api methods
|
||||||
V2MethodsNotSupported { error: String },
|
V2MethodsNotSupported { error: String },
|
||||||
/// The transition configuration with the EL failed, there might be a problem with
|
/// The transition configuration with the EL failed, there might be a problem with
|
||||||
@ -44,11 +42,6 @@ impl fmt::Display for CapellaReadiness {
|
|||||||
execution endpoint: {}",
|
execution endpoint: {}",
|
||||||
error
|
error
|
||||||
),
|
),
|
||||||
CapellaReadiness::NotSynced => write!(
|
|
||||||
f,
|
|
||||||
"The execution endpoint is connected and configured, \
|
|
||||||
however it is not yet synced"
|
|
||||||
),
|
|
||||||
CapellaReadiness::NoExecutionEndpoint => write!(
|
CapellaReadiness::NoExecutionEndpoint => write!(
|
||||||
f,
|
f,
|
||||||
"The --execution-endpoint flag is not specified, this is a \
|
"The --execution-endpoint flag is not specified, this is a \
|
||||||
@ -56,8 +49,7 @@ impl fmt::Display for CapellaReadiness {
|
|||||||
),
|
),
|
||||||
CapellaReadiness::V2MethodsNotSupported { error } => write!(
|
CapellaReadiness::V2MethodsNotSupported { error } => write!(
|
||||||
f,
|
f,
|
||||||
"The execution endpoint does not appear to support \
|
"Execution endpoint does not support Capella methods: {}",
|
||||||
the required engine api methods for Capella: {}",
|
|
||||||
error
|
error
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@ -115,12 +107,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if all_good {
|
if all_good {
|
||||||
if !el.is_synced_for_notifier().await {
|
CapellaReadiness::Ready
|
||||||
// The EL is not synced.
|
|
||||||
CapellaReadiness::NotSynced
|
|
||||||
} else {
|
|
||||||
CapellaReadiness::Ready
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
CapellaReadiness::V2MethodsNotSupported {
|
CapellaReadiness::V2MethodsNotSupported {
|
||||||
error: missing_methods,
|
error: missing_methods,
|
||||||
|
@ -2,9 +2,41 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
|
|||||||
use operation_pool::{
|
use operation_pool::{
|
||||||
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
||||||
};
|
};
|
||||||
use slog::{debug, info, Logger};
|
use slog::{debug, error, info, Logger};
|
||||||
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
|
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
|
||||||
|
use types::{EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
|
/// The slot clock isn't usually available before the database is initialized, so we construct a
|
||||||
|
/// temporary slot clock by reading the genesis state. It should always exist if the database is
|
||||||
|
/// initialized at a prior schema version, however we still handle the lack of genesis state
|
||||||
|
/// gracefully.
|
||||||
|
fn get_slot_clock<T: BeaconChainTypes>(
|
||||||
|
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
|
||||||
|
log: &Logger,
|
||||||
|
) -> Result<Option<T::SlotClock>, Error> {
|
||||||
|
let spec = db.get_chain_spec();
|
||||||
|
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
||||||
|
block
|
||||||
|
} else {
|
||||||
|
error!(log, "Missing genesis block");
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let genesis_state =
|
||||||
|
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
||||||
|
state
|
||||||
|
} else {
|
||||||
|
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
Ok(Some(T::SlotClock::new(
|
||||||
|
spec.genesis_slot,
|
||||||
|
Duration::from_secs(genesis_state.genesis_time()),
|
||||||
|
Duration::from_secs(spec.seconds_per_slot),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
||||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
@ -41,17 +73,35 @@ pub fn downgrade_from_v14<T: BeaconChainTypes>(
|
|||||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
|
// We cannot downgrade from V14 once the Capella fork has been reached because there will
|
||||||
|
// be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions
|
||||||
|
// of Lighthouse can't handle that.
|
||||||
|
if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch {
|
||||||
|
let current_epoch = get_slot_clock::<T>(&db, &log)?
|
||||||
|
.and_then(|clock| clock.now())
|
||||||
|
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||||
|
.ok_or(Error::SlotClockUnavailableForMigration)?;
|
||||||
|
|
||||||
|
if current_epoch >= capella_fork_epoch {
|
||||||
|
error!(
|
||||||
|
log,
|
||||||
|
"Capella already active: v14+ is mandatory";
|
||||||
|
"current_epoch" => current_epoch,
|
||||||
|
"capella_fork_epoch" => capella_fork_epoch,
|
||||||
|
);
|
||||||
|
return Err(Error::UnableToDowngrade);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load a V14 op pool and transform it to V12.
|
// Load a V14 op pool and transform it to V12.
|
||||||
let PersistedOperationPoolV14 {
|
let PersistedOperationPoolV14::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
} = if let Some(PersistedOperationPool::<T::EthSpec>::V14(op_pool)) =
|
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
db.get_item(&OP_POOL_DB_KEY)?
|
|
||||||
{
|
|
||||||
op_pool
|
op_pool
|
||||||
} else {
|
} else {
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
@ -43,7 +43,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V15 op pool and transform it to V14.
|
// Load a V15 op pool and transform it to V14.
|
||||||
let PersistedOperationPoolV15 {
|
let PersistedOperationPoolV15::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
@ -51,9 +51,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
|||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
capella_bls_change_broadcast_indices,
|
capella_bls_change_broadcast_indices,
|
||||||
} = if let Some(PersistedOperationPool::<T::EthSpec>::V15(op_pool)) =
|
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
db.get_item(&OP_POOL_DB_KEY)?
|
|
||||||
{
|
|
||||||
op_pool
|
op_pool
|
||||||
} else {
|
} else {
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
@ -29,7 +29,7 @@ const TOTAL_LABEL: &str = "total";
|
|||||||
|
|
||||||
/// The validator monitor collects per-epoch data about each monitored validator. Historical data
|
/// The validator monitor collects per-epoch data about each monitored validator. Historical data
|
||||||
/// will be kept around for `HISTORIC_EPOCHS` before it is pruned.
|
/// will be kept around for `HISTORIC_EPOCHS` before it is pruned.
|
||||||
pub const HISTORIC_EPOCHS: usize = 4;
|
pub const HISTORIC_EPOCHS: usize = 10;
|
||||||
|
|
||||||
/// Once the validator monitor reaches this number of validators it will stop
|
/// Once the validator monitor reaches this number of validators it will stop
|
||||||
/// tracking their metrics/logging individually in an effort to reduce
|
/// tracking their metrics/logging individually in an effort to reduce
|
||||||
@ -45,7 +45,7 @@ pub enum Error {
|
|||||||
|
|
||||||
/// Contains data pertaining to one validator for one epoch.
|
/// Contains data pertaining to one validator for one epoch.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct EpochSummary {
|
pub struct EpochSummary {
|
||||||
/*
|
/*
|
||||||
* Attestations with a target in the current epoch.
|
* Attestations with a target in the current epoch.
|
||||||
*/
|
*/
|
||||||
@ -103,6 +103,12 @@ struct EpochSummary {
|
|||||||
pub proposer_slashings: usize,
|
pub proposer_slashings: usize,
|
||||||
/// The number of attester slashings observed.
|
/// The number of attester slashings observed.
|
||||||
pub attester_slashings: usize,
|
pub attester_slashings: usize,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Other validator info helpful for the UI.
|
||||||
|
*/
|
||||||
|
/// The total balance of the validator.
|
||||||
|
pub total_balance: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EpochSummary {
|
impl EpochSummary {
|
||||||
@ -176,18 +182,60 @@ impl EpochSummary {
|
|||||||
pub fn register_attester_slashing(&mut self) {
|
pub fn register_attester_slashing(&mut self) {
|
||||||
self.attester_slashings += 1;
|
self.attester_slashings += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn register_validator_total_balance(&mut self, total_balance: u64) {
|
||||||
|
self.total_balance = Some(total_balance)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type SummaryMap = HashMap<Epoch, EpochSummary>;
|
type SummaryMap = HashMap<Epoch, EpochSummary>;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct ValidatorMetrics {
|
||||||
|
pub attestation_hits: u64,
|
||||||
|
pub attestation_misses: u64,
|
||||||
|
pub attestation_head_hits: u64,
|
||||||
|
pub attestation_head_misses: u64,
|
||||||
|
pub attestation_target_hits: u64,
|
||||||
|
pub attestation_target_misses: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ValidatorMetrics {
|
||||||
|
pub fn increment_hits(&mut self) {
|
||||||
|
self.attestation_hits += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn increment_misses(&mut self) {
|
||||||
|
self.attestation_misses += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn increment_target_hits(&mut self) {
|
||||||
|
self.attestation_target_hits += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn increment_target_misses(&mut self) {
|
||||||
|
self.attestation_target_misses += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn increment_head_hits(&mut self) {
|
||||||
|
self.attestation_head_hits += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn increment_head_misses(&mut self) {
|
||||||
|
self.attestation_head_misses += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A validator that is being monitored by the `ValidatorMonitor`.
|
/// A validator that is being monitored by the `ValidatorMonitor`.
|
||||||
struct MonitoredValidator {
|
pub struct MonitoredValidator {
|
||||||
/// A human-readable identifier for the validator.
|
/// A human-readable identifier for the validator.
|
||||||
pub id: String,
|
pub id: String,
|
||||||
/// The validator index in the state.
|
/// The validator index in the state.
|
||||||
pub index: Option<u64>,
|
pub index: Option<u64>,
|
||||||
/// A history of the validator over time.
|
/// A history of the validator over time.
|
||||||
pub summaries: RwLock<SummaryMap>,
|
pub summaries: RwLock<SummaryMap>,
|
||||||
|
/// Validator metrics to be exposed over the HTTP API.
|
||||||
|
pub metrics: RwLock<ValidatorMetrics>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MonitoredValidator {
|
impl MonitoredValidator {
|
||||||
@ -198,6 +246,7 @@ impl MonitoredValidator {
|
|||||||
.unwrap_or_else(|| pubkey.to_string()),
|
.unwrap_or_else(|| pubkey.to_string()),
|
||||||
index,
|
index,
|
||||||
summaries: <_>::default(),
|
summaries: <_>::default(),
|
||||||
|
metrics: <_>::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,6 +301,20 @@ impl MonitoredValidator {
|
|||||||
fn touch_epoch_summary(&self, epoch: Epoch) {
|
fn touch_epoch_summary(&self, epoch: Epoch) {
|
||||||
self.with_epoch_summary(epoch, |_| {});
|
self.with_epoch_summary(epoch, |_| {});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_from_epoch_summary<F, U>(&self, epoch: Epoch, func: F) -> Option<U>
|
||||||
|
where
|
||||||
|
F: Fn(Option<&EpochSummary>) -> Option<U>,
|
||||||
|
{
|
||||||
|
let summaries = self.summaries.read();
|
||||||
|
func(summaries.get(&epoch))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_total_balance(&self, epoch: Epoch) -> Option<u64> {
|
||||||
|
self.get_from_epoch_summary(epoch, |summary_opt| {
|
||||||
|
summary_opt.and_then(|summary| summary.total_balance)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P
|
/// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P
|
||||||
@ -347,12 +410,20 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(i) = monitored_validator.index {
|
if let Some(i) = monitored_validator.index {
|
||||||
monitored_validator.touch_epoch_summary(current_epoch);
|
monitored_validator.touch_epoch_summary(current_epoch);
|
||||||
|
|
||||||
|
let i = i as usize;
|
||||||
|
|
||||||
|
// Cache relevant validator info.
|
||||||
|
if let Some(balance) = state.balances().get(i) {
|
||||||
|
monitored_validator.with_epoch_summary(current_epoch, |summary| {
|
||||||
|
summary.register_validator_total_balance(*balance)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Only log the per-validator metrics if it's enabled.
|
// Only log the per-validator metrics if it's enabled.
|
||||||
if !self.individual_tracking() {
|
if !self.individual_tracking() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let i = i as usize;
|
|
||||||
let id = &monitored_validator.id;
|
let id = &monitored_validator.id;
|
||||||
|
|
||||||
if let Some(balance) = state.balances().get(i) {
|
if let Some(balance) = state.balances().get(i) {
|
||||||
@ -479,6 +550,25 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store some metrics directly to be re-exposed on the HTTP API.
|
||||||
|
let mut validator_metrics = monitored_validator.metrics.write();
|
||||||
|
if previous_epoch_matched_any {
|
||||||
|
validator_metrics.increment_hits();
|
||||||
|
if previous_epoch_matched_target {
|
||||||
|
validator_metrics.increment_target_hits()
|
||||||
|
} else {
|
||||||
|
validator_metrics.increment_target_misses()
|
||||||
|
}
|
||||||
|
if previous_epoch_matched_head {
|
||||||
|
validator_metrics.increment_head_hits()
|
||||||
|
} else {
|
||||||
|
validator_metrics.increment_head_misses()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validator_metrics.increment_misses()
|
||||||
|
}
|
||||||
|
drop(validator_metrics);
|
||||||
|
|
||||||
// Indicates if any attestation made it on-chain.
|
// Indicates if any attestation made it on-chain.
|
||||||
//
|
//
|
||||||
// For Base states, this will be *any* attestation whatsoever. For Altair states,
|
// For Base states, this will be *any* attestation whatsoever. For Altair states,
|
||||||
@ -717,6 +807,14 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
self.validators.values().map(|val| val.id.clone()).collect()
|
self.validators.values().map(|val| val.id.clone()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_monitored_validator(&self, index: u64) -> Option<&MonitoredValidator> {
|
||||||
|
if let Some(pubkey) = self.indices.get(&index) {
|
||||||
|
self.validators.get(pubkey)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
||||||
/// Otherwise, do nothing.
|
/// Otherwise, do nothing.
|
||||||
pub fn auto_register_local_validator(&mut self, validator_index: u64) {
|
pub fn auto_register_local_validator(&mut self, validator_index: u64) {
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
use beacon_chain::attestation_verification::Error as AttnError;
|
use beacon_chain::attestation_verification::Error as AttnError;
|
||||||
use beacon_chain::builder::BeaconChainBuilder;
|
use beacon_chain::builder::BeaconChainBuilder;
|
||||||
|
use beacon_chain::schema_change::migrate_schema;
|
||||||
use beacon_chain::test_utils::{
|
use beacon_chain::test_utils::{
|
||||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||||
};
|
};
|
||||||
@ -24,6 +25,7 @@ use std::collections::HashSet;
|
|||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
|
||||||
use store::{
|
use store::{
|
||||||
iter::{BlockRootsIterator, StateRootsIterator},
|
iter::{BlockRootsIterator, StateRootsIterator},
|
||||||
HotColdDB, LevelDB, StoreConfig,
|
HotColdDB, LevelDB, StoreConfig,
|
||||||
@ -78,6 +80,7 @@ fn get_harness(
|
|||||||
let harness = TestHarness::builder(MinimalEthSpec)
|
let harness = TestHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
|
.logger(store.logger().clone())
|
||||||
.fresh_disk_store(store)
|
.fresh_disk_store(store)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.build();
|
.build();
|
||||||
@ -2543,6 +2546,91 @@ async fn revert_minority_fork_on_resume() {
|
|||||||
assert_eq!(heads.len(), 1);
|
assert_eq!(heads.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test checks whether the schema downgrade from the latest version to some minimum supported
|
||||||
|
// version is correct. This is the easiest schema test to write without historic versions of
|
||||||
|
// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually
|
||||||
|
// as old downgrades are deprecated.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn schema_downgrade_to_min_version() {
|
||||||
|
let num_blocks_produced = E::slots_per_epoch() * 4;
|
||||||
|
let db_path = tempdir().unwrap();
|
||||||
|
let store = get_store(&db_path);
|
||||||
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
|
let spec = &harness.chain.spec.clone();
|
||||||
|
|
||||||
|
harness
|
||||||
|
.extend_chain(
|
||||||
|
num_blocks_produced as usize,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let min_version = if harness.spec.capella_fork_epoch.is_some() {
|
||||||
|
// Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that
|
||||||
|
// at all if Capella is enabled.
|
||||||
|
SchemaVersion(14)
|
||||||
|
} else {
|
||||||
|
SchemaVersion(11)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Close the database to ensure everything is written to disk.
|
||||||
|
drop(store);
|
||||||
|
drop(harness);
|
||||||
|
|
||||||
|
// Re-open the store.
|
||||||
|
let store = get_store(&db_path);
|
||||||
|
|
||||||
|
// Downgrade.
|
||||||
|
let deposit_contract_deploy_block = 0;
|
||||||
|
migrate_schema::<DiskHarnessType<E>>(
|
||||||
|
store.clone(),
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
CURRENT_SCHEMA_VERSION,
|
||||||
|
min_version,
|
||||||
|
store.logger().clone(),
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.expect("schema downgrade to minimum version should work");
|
||||||
|
|
||||||
|
// Upgrade back.
|
||||||
|
migrate_schema::<DiskHarnessType<E>>(
|
||||||
|
store.clone(),
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
min_version,
|
||||||
|
CURRENT_SCHEMA_VERSION,
|
||||||
|
store.logger().clone(),
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.expect("schema upgrade from minimum version should work");
|
||||||
|
|
||||||
|
// Rescreate the harness.
|
||||||
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
|
.default_spec()
|
||||||
|
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
||||||
|
.logger(store.logger().clone())
|
||||||
|
.resumed_disk_store(store.clone())
|
||||||
|
.mock_execution_layer()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
check_finalization(&harness, num_blocks_produced);
|
||||||
|
check_split_slot(&harness, store.clone());
|
||||||
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
||||||
|
check_iterators(&harness);
|
||||||
|
|
||||||
|
// Check that downgrading beyond the minimum version fails (bound is *tight*).
|
||||||
|
let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap());
|
||||||
|
migrate_schema::<DiskHarnessType<E>>(
|
||||||
|
store.clone(),
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
CURRENT_SCHEMA_VERSION,
|
||||||
|
min_version_sub_1,
|
||||||
|
harness.logger().clone(),
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.expect_err("should not downgrade below minimum version");
|
||||||
|
}
|
||||||
|
|
||||||
/// Checks that two chains are the same, for the purpose of these tests.
|
/// Checks that two chains are the same, for the purpose of these tests.
|
||||||
///
|
///
|
||||||
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
||||||
|
@ -39,7 +39,7 @@ time = "0.3.5"
|
|||||||
directory = {path = "../../common/directory"}
|
directory = {path = "../../common/directory"}
|
||||||
http_api = { path = "../http_api" }
|
http_api = { path = "../http_api" }
|
||||||
http_metrics = { path = "../http_metrics" }
|
http_metrics = { path = "../http_metrics" }
|
||||||
slasher = { path = "../../slasher" }
|
slasher = { path = "../../slasher", default-features = false }
|
||||||
slasher_service = { path = "../../slasher/service" }
|
slasher_service = { path = "../../slasher/service" }
|
||||||
monitoring_api = {path = "../../common/monitoring_api"}
|
monitoring_api = {path = "../../common/monitoring_api"}
|
||||||
execution_layer = { path = "../execution_layer" }
|
execution_layer = { path = "../execution_layer" }
|
||||||
|
@ -466,13 +466,14 @@ async fn capella_readiness_logging<T: BeaconChainTypes>(
|
|||||||
error!(
|
error!(
|
||||||
log,
|
log,
|
||||||
"Not ready for Capella";
|
"Not ready for Capella";
|
||||||
|
"hint" => "the execution endpoint may be offline",
|
||||||
"info" => %readiness,
|
"info" => %readiness,
|
||||||
"hint" => "try updating Lighthouse and/or the execution layer",
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
readiness => warn!(
|
readiness => warn!(
|
||||||
log,
|
log,
|
||||||
"Not ready for Capella";
|
"Not ready for Capella";
|
||||||
|
"hint" => "try updating the execution endpoint",
|
||||||
"info" => %readiness,
|
"info" => %readiness,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ use serde_json::json;
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, Instant};
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
pub use deposit_log::{DepositLog, Log};
|
pub use deposit_log::{DepositLog, Log};
|
||||||
@ -566,31 +566,23 @@ pub mod deposit_methods {
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct CapabilitiesCacheEntry {
|
pub struct CapabilitiesCacheEntry {
|
||||||
engine_capabilities: EngineCapabilities,
|
engine_capabilities: EngineCapabilities,
|
||||||
fetch_time: SystemTime,
|
fetch_time: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CapabilitiesCacheEntry {
|
impl CapabilitiesCacheEntry {
|
||||||
pub fn new(engine_capabilities: EngineCapabilities) -> Self {
|
pub fn new(engine_capabilities: EngineCapabilities) -> Self {
|
||||||
Self {
|
Self {
|
||||||
engine_capabilities,
|
engine_capabilities,
|
||||||
fetch_time: SystemTime::now(),
|
fetch_time: Instant::now(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn engine_capabilities(&self) -> &EngineCapabilities {
|
pub fn engine_capabilities(&self) -> EngineCapabilities {
|
||||||
&self.engine_capabilities
|
self.engine_capabilities
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn age(&self) -> Duration {
|
pub fn age(&self) -> Duration {
|
||||||
// duration_since() may fail because measurements taken earlier
|
Instant::now().duration_since(self.fetch_time)
|
||||||
// are not guaranteed to always be before later measurements
|
|
||||||
// due to anomalies such as the system clock being adjusted
|
|
||||||
// either forwards or backwards
|
|
||||||
//
|
|
||||||
// In such cases, we'll just say the age is zero
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(self.fetch_time)
|
|
||||||
.unwrap_or(Duration::ZERO)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns `true` if the entry's age is >= age_limit
|
/// returns `true` if the entry's age is >= age_limit
|
||||||
@ -841,7 +833,9 @@ impl HttpJsonRpc {
|
|||||||
|
|
||||||
Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge {
|
Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge {
|
||||||
execution_payload: payload_v1.into(),
|
execution_payload: payload_v1.into(),
|
||||||
// Have to guess zero here as we don't know the value
|
// Set the V1 payload values from the EE to be zero. This simulates
|
||||||
|
// the pre-block-value functionality of always choosing the builder
|
||||||
|
// block.
|
||||||
block_value: Uint256::zero(),
|
block_value: Uint256::zero(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@ -1055,16 +1049,12 @@ impl HttpJsonRpc {
|
|||||||
) -> Result<EngineCapabilities, Error> {
|
) -> Result<EngineCapabilities, Error> {
|
||||||
let mut lock = self.engine_capabilities_cache.lock().await;
|
let mut lock = self.engine_capabilities_cache.lock().await;
|
||||||
|
|
||||||
if lock
|
if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) {
|
||||||
.as_ref()
|
Ok(lock.engine_capabilities())
|
||||||
.map_or(true, |entry| entry.older_than(age_limit))
|
} else {
|
||||||
{
|
|
||||||
let engine_capabilities = self.exchange_capabilities().await?;
|
let engine_capabilities = self.exchange_capabilities().await?;
|
||||||
*lock = Some(CapabilitiesCacheEntry::new(engine_capabilities));
|
*lock = Some(CapabilitiesCacheEntry::new(engine_capabilities));
|
||||||
Ok(engine_capabilities)
|
Ok(engine_capabilities)
|
||||||
} else {
|
|
||||||
// here entry is guaranteed to exist so unwrap() is safe
|
|
||||||
Ok(*lock.as_ref().unwrap().engine_capabilities())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +144,6 @@ impl<T: EthSpec> From<ExecutionPayloadCapella<T>> for JsonExecutionPayloadV2<T>
|
|||||||
withdrawals: payload
|
withdrawals: payload
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.cloned()
|
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into(),
|
.into(),
|
||||||
@ -172,7 +171,6 @@ impl<T: EthSpec> From<ExecutionPayloadEip4844<T>> for JsonExecutionPayloadV3<T>
|
|||||||
withdrawals: payload
|
withdrawals: payload
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.cloned()
|
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into(),
|
.into(),
|
||||||
@ -230,7 +228,6 @@ impl<T: EthSpec> From<JsonExecutionPayloadV2<T>> for ExecutionPayloadCapella<T>
|
|||||||
withdrawals: payload
|
withdrawals: payload
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.cloned()
|
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into(),
|
.into(),
|
||||||
@ -258,7 +255,6 @@ impl<T: EthSpec> From<JsonExecutionPayloadV3<T>> for ExecutionPayloadEip4844<T>
|
|||||||
withdrawals: payload
|
withdrawals: payload
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.cloned()
|
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into(),
|
.into(),
|
||||||
|
@ -17,8 +17,7 @@ use types::ExecutionBlockHash;
|
|||||||
|
|
||||||
/// The number of payload IDs that will be stored for each `Engine`.
|
/// The number of payload IDs that will be stored for each `Engine`.
|
||||||
///
|
///
|
||||||
/// Since the size of each value is small (~100 bytes) a large number is used for safety.
|
/// Since the size of each value is small (~800 bytes) a large number is used for safety.
|
||||||
/// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals
|
|
||||||
const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512;
|
const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512;
|
||||||
const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes
|
const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes
|
||||||
|
|
||||||
@ -276,7 +275,7 @@ impl Engine {
|
|||||||
|
|
||||||
let mut state = self.state.write().await;
|
let mut state = self.state.write().await;
|
||||||
state.update(EngineStateInternal::AuthFailed);
|
state.update(EngineStateInternal::AuthFailed);
|
||||||
(**state, CapabilitiesCacheAction::None)
|
(**state, CapabilitiesCacheAction::Clear)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
error!(
|
||||||
@ -342,7 +341,7 @@ impl Engine {
|
|||||||
/// deadlock.
|
/// deadlock.
|
||||||
pub async fn request<'a, F, G, H>(self: &'a Arc<Self>, func: F) -> Result<H, EngineError>
|
pub async fn request<'a, F, G, H>(self: &'a Arc<Self>, func: F) -> Result<H, EngineError>
|
||||||
where
|
where
|
||||||
F: Fn(&'a Engine) -> G,
|
F: FnOnce(&'a Engine) -> G,
|
||||||
G: Future<Output = Result<H, EngineApiError>>,
|
G: Future<Output = Result<H, EngineApiError>>,
|
||||||
{
|
{
|
||||||
match func(self).await {
|
match func(self).await {
|
||||||
|
@ -1345,16 +1345,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
.set_latest_forkchoice_state(forkchoice_state)
|
.set_latest_forkchoice_state(forkchoice_state)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let payload_attributes_ref = &payload_attributes;
|
|
||||||
let result = self
|
let result = self
|
||||||
.engine()
|
.engine()
|
||||||
.request(|engine| async move {
|
.request(|engine| async move {
|
||||||
engine
|
engine
|
||||||
.notify_forkchoice_updated(
|
.notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log())
|
||||||
forkchoice_state,
|
|
||||||
payload_attributes_ref.clone(),
|
|
||||||
self.log(),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
@ -1720,7 +1715,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
capella_block
|
capella_block
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|w| w.into())
|
.map(Into::into)
|
||||||
.collect(),
|
.collect(),
|
||||||
)
|
)
|
||||||
.map_err(ApiError::DeserializeWithdrawals)?;
|
.map_err(ApiError::DeserializeWithdrawals)?;
|
||||||
@ -1747,7 +1742,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
eip4844_block
|
eip4844_block
|
||||||
.withdrawals
|
.withdrawals
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|w| w.into())
|
.map(Into::into)
|
||||||
.collect(),
|
.collect(),
|
||||||
)
|
)
|
||||||
.map_err(ApiError::DeserializeWithdrawals)?;
|
.map_err(ApiError::DeserializeWithdrawals)?;
|
||||||
|
@ -36,6 +36,7 @@ tree_hash = "0.4.1"
|
|||||||
sysinfo = "0.26.5"
|
sysinfo = "0.26.5"
|
||||||
system_health = { path = "../../common/system_health" }
|
system_health = { path = "../../common/system_health" }
|
||||||
directory = { path = "../../common/directory" }
|
directory = { path = "../../common/directory" }
|
||||||
|
eth2_serde_utils = "0.1.1"
|
||||||
operation_pool = { path = "../operation_pool" }
|
operation_pool = { path = "../operation_pool" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -2374,11 +2374,19 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.and(not_while_syncing_filter.clone())
|
.and(not_while_syncing_filter.clone())
|
||||||
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
||||||
.and(chain_filter.clone())
|
.and(chain_filter.clone())
|
||||||
|
.and(log_filter.clone())
|
||||||
.and_then(
|
.and_then(
|
||||||
|endpoint_version: EndpointVersion,
|
|endpoint_version: EndpointVersion,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
query: api_types::ValidatorBlocksQuery,
|
query: api_types::ValidatorBlocksQuery,
|
||||||
chain: Arc<BeaconChain<T>>| async move {
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
log: Logger| async move {
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Block production request from HTTP API";
|
||||||
|
"slot" => slot
|
||||||
|
);
|
||||||
|
|
||||||
let randao_reveal = query.randao_reveal.decompress().map_err(|e| {
|
let randao_reveal = query.randao_reveal.decompress().map_err(|e| {
|
||||||
warp_utils::reject::custom_bad_request(format!(
|
warp_utils::reject::custom_bad_request(format!(
|
||||||
"randao reveal is not a valid BLS signature: {:?}",
|
"randao reveal is not a valid BLS signature: {:?}",
|
||||||
@ -3125,6 +3133,22 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// POST lighthouse/ui/validator_info
|
||||||
|
let post_lighthouse_ui_validator_info = warp::path("lighthouse")
|
||||||
|
.and(warp::path("ui"))
|
||||||
|
.and(warp::path("validator_info"))
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(warp::body::json())
|
||||||
|
.and(chain_filter.clone())
|
||||||
|
.and_then(
|
||||||
|
|request_data: ui::ValidatorInfoRequestData, chain: Arc<BeaconChain<T>>| {
|
||||||
|
blocking_json_task(move || {
|
||||||
|
ui::get_validator_info(request_data, chain)
|
||||||
|
.map(api_types::GenericResponse::from)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// GET lighthouse/syncing
|
// GET lighthouse/syncing
|
||||||
let get_lighthouse_syncing = warp::path("lighthouse")
|
let get_lighthouse_syncing = warp::path("lighthouse")
|
||||||
.and(warp::path("syncing"))
|
.and(warp::path("syncing"))
|
||||||
@ -3660,6 +3684,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(post_lighthouse_database_historical_blocks.boxed())
|
.or(post_lighthouse_database_historical_blocks.boxed())
|
||||||
.or(post_lighthouse_block_rewards.boxed())
|
.or(post_lighthouse_block_rewards.boxed())
|
||||||
.or(post_lighthouse_ui_validator_metrics.boxed())
|
.or(post_lighthouse_ui_validator_metrics.boxed())
|
||||||
|
.or(post_lighthouse_ui_validator_info.boxed())
|
||||||
.recover(warp_utils::reject::handle_rejection),
|
.recover(warp_utils::reject::handle_rejection),
|
||||||
))
|
))
|
||||||
.recover(warp_utils::reject::handle_rejection)
|
.recover(warp_utils::reject::handle_rejection)
|
||||||
|
@ -5,7 +5,7 @@ use beacon_chain::NotifyExecutionLayer;
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
||||||
use lighthouse_network::PubsubMessage;
|
use lighthouse_network::PubsubMessage;
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
use slog::{error, info, warn, Logger};
|
use slog::{debug, error, info, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc::UnboundedSender;
|
use tokio::sync::mpsc::UnboundedSender;
|
||||||
@ -29,6 +29,11 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
|||||||
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
|
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
|
||||||
//this may skew metrics
|
//this may skew metrics
|
||||||
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Signed block published to HTTP API";
|
||||||
|
"slot" => block.slot()
|
||||||
|
);
|
||||||
|
|
||||||
// Send the block, regardless of whether or not it is valid. The API
|
// Send the block, regardless of whether or not it is valid. The API
|
||||||
// specification is very clear that this is the desired behaviour.
|
// specification is very clear that this is the desired behaviour.
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes};
|
use beacon_chain::{
|
||||||
use eth2::types::ValidatorStatus;
|
validator_monitor::HISTORIC_EPOCHS, BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||||
|
};
|
||||||
|
use eth2::types::{Epoch, ValidatorStatus};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -71,6 +73,82 @@ pub fn get_validator_count<T: BeaconChainTypes>(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ValidatorInfoRequestData {
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64_vec")]
|
||||||
|
indices: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ValidatorInfoValues {
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
epoch: u64,
|
||||||
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
total_balance: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ValidatorInfo {
|
||||||
|
info: Vec<ValidatorInfoValues>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ValidatorInfoResponse {
|
||||||
|
validators: HashMap<String, ValidatorInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_validator_info<T: BeaconChainTypes>(
|
||||||
|
request_data: ValidatorInfoRequestData,
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
) -> Result<ValidatorInfoResponse, warp::Rejection> {
|
||||||
|
let current_epoch = chain.epoch().map_err(beacon_chain_error)?;
|
||||||
|
|
||||||
|
let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64();
|
||||||
|
|
||||||
|
let validator_ids = chain
|
||||||
|
.validator_monitor
|
||||||
|
.read()
|
||||||
|
.get_all_monitored_validators()
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect::<HashSet<String>>();
|
||||||
|
|
||||||
|
let indices = request_data
|
||||||
|
.indices
|
||||||
|
.iter()
|
||||||
|
.map(|index| index.to_string())
|
||||||
|
.collect::<HashSet<String>>();
|
||||||
|
|
||||||
|
let ids = validator_ids
|
||||||
|
.intersection(&indices)
|
||||||
|
.collect::<HashSet<&String>>();
|
||||||
|
|
||||||
|
let mut validators = HashMap::new();
|
||||||
|
|
||||||
|
for id in ids {
|
||||||
|
if let Ok(index) = id.parse::<u64>() {
|
||||||
|
if let Some(validator) = chain
|
||||||
|
.validator_monitor
|
||||||
|
.read()
|
||||||
|
.get_monitored_validator(index)
|
||||||
|
{
|
||||||
|
let mut info = vec![];
|
||||||
|
for epoch in epochs.clone() {
|
||||||
|
if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) {
|
||||||
|
info.push(ValidatorInfoValues {
|
||||||
|
epoch,
|
||||||
|
total_balance,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
validators.insert(id.clone(), ValidatorInfo { info });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ValidatorInfoResponse { validators })
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Serialize, Deserialize)]
|
#[derive(PartialEq, Serialize, Deserialize)]
|
||||||
pub struct ValidatorMetricsRequestData {
|
pub struct ValidatorMetricsRequestData {
|
||||||
indices: Vec<u64>,
|
indices: Vec<u64>,
|
||||||
@ -119,76 +197,56 @@ pub fn post_validator_monitor_metrics<T: BeaconChainTypes>(
|
|||||||
let mut validators = HashMap::new();
|
let mut validators = HashMap::new();
|
||||||
|
|
||||||
for id in ids {
|
for id in ids {
|
||||||
let attestation_hits = metrics::get_int_counter(
|
if let Ok(index) = id.parse::<u64>() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT,
|
if let Some(validator) = chain
|
||||||
&[id],
|
.validator_monitor
|
||||||
)
|
.read()
|
||||||
.map(|counter| counter.get())
|
.get_monitored_validator(index)
|
||||||
.unwrap_or(0);
|
{
|
||||||
let attestation_misses = metrics::get_int_counter(
|
let val_metrics = validator.metrics.read();
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS,
|
let attestation_hits = val_metrics.attestation_hits;
|
||||||
&[id],
|
let attestation_misses = val_metrics.attestation_misses;
|
||||||
)
|
let attestation_head_hits = val_metrics.attestation_head_hits;
|
||||||
.map(|counter| counter.get())
|
let attestation_head_misses = val_metrics.attestation_head_misses;
|
||||||
.unwrap_or(0);
|
let attestation_target_hits = val_metrics.attestation_target_hits;
|
||||||
let attestations = attestation_hits + attestation_misses;
|
let attestation_target_misses = val_metrics.attestation_target_misses;
|
||||||
let attestation_hit_percentage: f64 = if attestations == 0 {
|
drop(val_metrics);
|
||||||
0.0
|
|
||||||
} else {
|
|
||||||
(100 * attestation_hits / attestations) as f64
|
|
||||||
};
|
|
||||||
|
|
||||||
let attestation_head_hits = metrics::get_int_counter(
|
let attestations = attestation_hits + attestation_misses;
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT,
|
let attestation_hit_percentage: f64 = if attestations == 0 {
|
||||||
&[id],
|
0.0
|
||||||
)
|
} else {
|
||||||
.map(|counter| counter.get())
|
(100 * attestation_hits / attestations) as f64
|
||||||
.unwrap_or(0);
|
};
|
||||||
let attestation_head_misses = metrics::get_int_counter(
|
let head_attestations = attestation_head_hits + attestation_head_misses;
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS,
|
let attestation_head_hit_percentage: f64 = if head_attestations == 0 {
|
||||||
&[id],
|
0.0
|
||||||
)
|
} else {
|
||||||
.map(|counter| counter.get())
|
(100 * attestation_head_hits / head_attestations) as f64
|
||||||
.unwrap_or(0);
|
};
|
||||||
let head_attestations = attestation_head_hits + attestation_head_misses;
|
|
||||||
let attestation_head_hit_percentage: f64 = if head_attestations == 0 {
|
|
||||||
0.0
|
|
||||||
} else {
|
|
||||||
(100 * attestation_head_hits / head_attestations) as f64
|
|
||||||
};
|
|
||||||
|
|
||||||
let attestation_target_hits = metrics::get_int_counter(
|
let target_attestations = attestation_target_hits + attestation_target_misses;
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT,
|
let attestation_target_hit_percentage: f64 = if target_attestations == 0 {
|
||||||
&[id],
|
0.0
|
||||||
)
|
} else {
|
||||||
.map(|counter| counter.get())
|
(100 * attestation_target_hits / target_attestations) as f64
|
||||||
.unwrap_or(0);
|
};
|
||||||
let attestation_target_misses = metrics::get_int_counter(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS,
|
|
||||||
&[id],
|
|
||||||
)
|
|
||||||
.map(|counter| counter.get())
|
|
||||||
.unwrap_or(0);
|
|
||||||
let target_attestations = attestation_target_hits + attestation_target_misses;
|
|
||||||
let attestation_target_hit_percentage: f64 = if target_attestations == 0 {
|
|
||||||
0.0
|
|
||||||
} else {
|
|
||||||
(100 * attestation_target_hits / target_attestations) as f64
|
|
||||||
};
|
|
||||||
|
|
||||||
let metrics = ValidatorMetrics {
|
let metrics = ValidatorMetrics {
|
||||||
attestation_hits,
|
attestation_hits,
|
||||||
attestation_misses,
|
attestation_misses,
|
||||||
attestation_hit_percentage,
|
attestation_hit_percentage,
|
||||||
attestation_head_hits,
|
attestation_head_hits,
|
||||||
attestation_head_misses,
|
attestation_head_misses,
|
||||||
attestation_head_hit_percentage,
|
attestation_head_hit_percentage,
|
||||||
attestation_target_hits,
|
attestation_target_hits,
|
||||||
attestation_target_misses,
|
attestation_target_misses,
|
||||||
attestation_target_hit_percentage,
|
attestation_target_hit_percentage,
|
||||||
};
|
};
|
||||||
|
|
||||||
validators.insert(id.clone(), metrics);
|
validators.insert(id.clone(), metrics);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ValidatorMetricsResponse { validators })
|
Ok(ValidatorMetricsResponse { validators })
|
||||||
|
@ -116,7 +116,13 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.and_then(|ctx: Arc<Context<T>>| async move {
|
.and_then(|ctx: Arc<Context<T>>| async move {
|
||||||
Ok::<_, warp::Rejection>(
|
Ok::<_, warp::Rejection>(
|
||||||
metrics::gather_prometheus_metrics(&ctx)
|
metrics::gather_prometheus_metrics(&ctx)
|
||||||
.map(|body| Response::builder().status(200).body(body).unwrap())
|
.map(|body| {
|
||||||
|
Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "text/plain")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(500)
|
.status(500)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use beacon_chain::test_utils::EphemeralHarnessType;
|
use beacon_chain::test_utils::EphemeralHarnessType;
|
||||||
use environment::null_logger;
|
use environment::null_logger;
|
||||||
use http_metrics::Config;
|
use http_metrics::Config;
|
||||||
|
use reqwest::header::HeaderValue;
|
||||||
use reqwest::StatusCode;
|
use reqwest::StatusCode;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -45,7 +46,13 @@ async fn returns_200_ok() {
|
|||||||
listening_socket.port()
|
listening_socket.port()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
|
let response = reqwest::get(&url).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(response.status(), StatusCode::OK);
|
||||||
|
assert_eq!(
|
||||||
|
response.headers().get("Content-Type").unwrap(),
|
||||||
|
&HeaderValue::from_str("text/plain").unwrap()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
|||||||
smallvec = "1.6.1"
|
smallvec = "1.6.1"
|
||||||
tokio-io-timeout = "1.1.1"
|
tokio-io-timeout = "1.1.1"
|
||||||
lru = "0.7.1"
|
lru = "0.7.1"
|
||||||
|
lru_cache = { path = "../../common/lru_cache" }
|
||||||
parking_lot = "0.12.0"
|
parking_lot = "0.12.0"
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
snap = "1.0.1"
|
snap = "1.0.1"
|
||||||
|
@ -8,6 +8,7 @@ use crate::{Subnet, SubnetDiscovery};
|
|||||||
use delay_map::HashSetDelay;
|
use delay_map::HashSetDelay;
|
||||||
use discv5::Enr;
|
use discv5::Enr;
|
||||||
use libp2p::identify::Info as IdentifyInfo;
|
use libp2p::identify::Info as IdentifyInfo;
|
||||||
|
use lru_cache::LRUTimeCache;
|
||||||
use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use slog::{debug, error, trace, warn};
|
use slog::{debug, error, trace, warn};
|
||||||
@ -39,6 +40,9 @@ mod network_behaviour;
|
|||||||
/// requests. This defines the interval in seconds.
|
/// requests. This defines the interval in seconds.
|
||||||
const HEARTBEAT_INTERVAL: u64 = 30;
|
const HEARTBEAT_INTERVAL: u64 = 30;
|
||||||
|
|
||||||
|
/// The minimum amount of time we allow peers to reconnect to us after a disconnect when we are
|
||||||
|
/// saturated with peers. This effectively looks like a swarm BAN for this amount of time.
|
||||||
|
pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600);
|
||||||
/// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would
|
/// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would
|
||||||
/// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet
|
/// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet
|
||||||
/// peers.
|
/// peers.
|
||||||
@ -74,6 +78,20 @@ pub struct PeerManager<TSpec: EthSpec> {
|
|||||||
target_peers: usize,
|
target_peers: usize,
|
||||||
/// Peers queued to be dialed.
|
/// Peers queued to be dialed.
|
||||||
peers_to_dial: VecDeque<(PeerId, Option<Enr>)>,
|
peers_to_dial: VecDeque<(PeerId, Option<Enr>)>,
|
||||||
|
/// The number of temporarily banned peers. This is used to prevent instantaneous
|
||||||
|
/// reconnection.
|
||||||
|
// NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A
|
||||||
|
// peer can be in a disconnected state and new connections will be refused and logged as if the
|
||||||
|
// peer is banned without it being reflected in the peer's state.
|
||||||
|
// Also the banned state can out-last the peer's reference in the peer db. So peers that are
|
||||||
|
// unknown to us can still be temporarily banned. This is fundamentally a relationship with
|
||||||
|
// the swarm. Regardless of our knowledge of the peer in the db, it will be temporarily banned
|
||||||
|
// at the swarm layer.
|
||||||
|
// NOTE: An LRUTimeCache is used compared to a structure that needs to be polled to avoid very
|
||||||
|
// frequent polling to unban peers. Instead, this cache piggy-backs the PeerManager heartbeat
|
||||||
|
// to update and clear the cache. Therefore the PEER_RECONNECTION_TIMEOUT only has a resolution
|
||||||
|
// of the HEARTBEAT_INTERVAL.
|
||||||
|
temporary_banned_peers: LRUTimeCache<PeerId>,
|
||||||
/// A collection of sync committee subnets that we need to stay subscribed to.
|
/// A collection of sync committee subnets that we need to stay subscribed to.
|
||||||
/// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run
|
/// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run
|
||||||
/// discovery queries for subnet peers if we disconnect from existing sync
|
/// discovery queries for subnet peers if we disconnect from existing sync
|
||||||
@ -143,6 +161,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)),
|
outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)),
|
||||||
status_peers: HashSetDelay::new(Duration::from_secs(status_interval)),
|
status_peers: HashSetDelay::new(Duration::from_secs(status_interval)),
|
||||||
target_peers: target_peer_count,
|
target_peers: target_peer_count,
|
||||||
|
temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT),
|
||||||
sync_committee_subnets: Default::default(),
|
sync_committee_subnets: Default::default(),
|
||||||
heartbeat,
|
heartbeat,
|
||||||
discovery_enabled,
|
discovery_enabled,
|
||||||
@ -243,6 +262,15 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
reason: Option<GoodbyeReason>,
|
reason: Option<GoodbyeReason>,
|
||||||
) {
|
) {
|
||||||
match ban_operation {
|
match ban_operation {
|
||||||
|
BanOperation::TemporaryBan => {
|
||||||
|
// The peer could be temporarily banned. We only do this in the case that
|
||||||
|
// we have currently reached our peer target limit.
|
||||||
|
if self.network_globals.connected_peers() >= self.target_peers {
|
||||||
|
// We have enough peers, prevent this reconnection.
|
||||||
|
self.temporary_banned_peers.raw_insert(*peer_id);
|
||||||
|
self.events.push(PeerManagerEvent::Banned(*peer_id, vec![]));
|
||||||
|
}
|
||||||
|
}
|
||||||
BanOperation::DisconnectThePeer => {
|
BanOperation::DisconnectThePeer => {
|
||||||
// The peer was currently connected, so we start a disconnection.
|
// The peer was currently connected, so we start a disconnection.
|
||||||
// Once the peer has disconnected, its connection state will transition to a
|
// Once the peer has disconnected, its connection state will transition to a
|
||||||
@ -259,6 +287,11 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
BanOperation::ReadyToBan(banned_ips) => {
|
BanOperation::ReadyToBan(banned_ips) => {
|
||||||
// The peer is not currently connected, we can safely ban it at the swarm
|
// The peer is not currently connected, we can safely ban it at the swarm
|
||||||
// level.
|
// level.
|
||||||
|
|
||||||
|
// If a peer is being banned, this trumps any temporary ban the peer might be
|
||||||
|
// under. We no longer track it in the temporary ban list.
|
||||||
|
self.temporary_banned_peers.raw_remove(peer_id);
|
||||||
|
|
||||||
// Inform the Swarm to ban the peer
|
// Inform the Swarm to ban the peer
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::Banned(*peer_id, banned_ips));
|
.push(PeerManagerEvent::Banned(*peer_id, banned_ips));
|
||||||
@ -1121,6 +1154,14 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Unbans any temporarily banned peers that have served their timeout.
|
||||||
|
fn unban_temporary_banned_peers(&mut self) {
|
||||||
|
for peer_id in self.temporary_banned_peers.remove_expired() {
|
||||||
|
self.events
|
||||||
|
.push(PeerManagerEvent::UnBanned(peer_id, Vec::new()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The Peer manager's heartbeat maintains the peer count and maintains peer reputations.
|
/// The Peer manager's heartbeat maintains the peer count and maintains peer reputations.
|
||||||
///
|
///
|
||||||
/// It will request discovery queries if the peer count has not reached the desired number of
|
/// It will request discovery queries if the peer count has not reached the desired number of
|
||||||
@ -1153,6 +1194,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
// Prune any excess peers back to our target in such a way that incentivises good scores and
|
// Prune any excess peers back to our target in such a way that incentivises good scores and
|
||||||
// a uniform distribution of subnets.
|
// a uniform distribution of subnets.
|
||||||
self.prune_excess_peers();
|
self.prune_excess_peers();
|
||||||
|
|
||||||
|
// Unban any peers that have served their temporary ban timeout
|
||||||
|
self.unban_temporary_banned_peers();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update metrics related to peer scoring.
|
// Update metrics related to peer scoring.
|
||||||
|
@ -170,7 +170,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
BanResult::NotBanned => {}
|
BanResult::NotBanned => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count dialing peers in the limit if the peer dialied us.
|
// Count dialing peers in the limit if the peer dialed us.
|
||||||
let count_dialing = endpoint.is_listener();
|
let count_dialing = endpoint.is_listener();
|
||||||
// Check the connection limits
|
// Check the connection limits
|
||||||
if self.peer_limit_reached(count_dialing)
|
if self.peer_limit_reached(count_dialing)
|
||||||
|
@ -844,8 +844,16 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
return Some(BanOperation::ReadyToBan(banned_ips));
|
return Some(BanOperation::ReadyToBan(banned_ips));
|
||||||
}
|
}
|
||||||
PeerConnectionStatus::Disconnecting { .. }
|
PeerConnectionStatus::Disconnecting { .. } => {
|
||||||
| PeerConnectionStatus::Unknown
|
// The peer has been disconnected but not banned. Inform the peer manager
|
||||||
|
// that this peer could be eligible for a temporary ban.
|
||||||
|
self.disconnected_peers += 1;
|
||||||
|
info.set_connection_status(PeerConnectionStatus::Disconnected {
|
||||||
|
since: Instant::now(),
|
||||||
|
});
|
||||||
|
return Some(BanOperation::TemporaryBan);
|
||||||
|
}
|
||||||
|
PeerConnectionStatus::Unknown
|
||||||
| PeerConnectionStatus::Connected { .. }
|
| PeerConnectionStatus::Connected { .. }
|
||||||
| PeerConnectionStatus::Dialing { .. } => {
|
| PeerConnectionStatus::Dialing { .. } => {
|
||||||
self.disconnected_peers += 1;
|
self.disconnected_peers += 1;
|
||||||
@ -1177,6 +1185,9 @@ impl From<Option<BanOperation>> for ScoreUpdateResult {
|
|||||||
|
|
||||||
/// When attempting to ban a peer provides the peer manager with the operation that must be taken.
|
/// When attempting to ban a peer provides the peer manager with the operation that must be taken.
|
||||||
pub enum BanOperation {
|
pub enum BanOperation {
|
||||||
|
/// Optionally temporarily ban this peer to prevent instantaneous reconnection.
|
||||||
|
/// The peer manager will decide if temporary banning is required.
|
||||||
|
TemporaryBan,
|
||||||
// The peer is currently connected. Perform a graceful disconnect before banning at the swarm
|
// The peer is currently connected. Perform a graceful disconnect before banning at the swarm
|
||||||
// level.
|
// level.
|
||||||
DisconnectThePeer,
|
DisconnectThePeer,
|
||||||
|
@ -22,8 +22,9 @@ use tokio_util::{
|
|||||||
};
|
};
|
||||||
use types::BlobsSidecar;
|
use types::BlobsSidecar;
|
||||||
use types::{
|
use types::{
|
||||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, EthSpec,
|
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge,
|
||||||
ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock,
|
EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature,
|
||||||
|
SignedBeaconBlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
@ -62,6 +63,13 @@ lazy_static! {
|
|||||||
.as_ssz_bytes()
|
.as_ssz_bytes()
|
||||||
.len();
|
.len();
|
||||||
|
|
||||||
|
pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
|
||||||
|
BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())),
|
||||||
|
Signature::empty(),
|
||||||
|
)
|
||||||
|
.as_ssz_bytes()
|
||||||
|
.len();
|
||||||
|
|
||||||
/// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing.
|
/// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing.
|
||||||
/// We calculate the value from its fields instead of constructing the block and checking the length.
|
/// We calculate the value from its fields instead of constructing the block and checking the length.
|
||||||
/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network
|
/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network
|
||||||
@ -72,11 +80,11 @@ lazy_static! {
|
|||||||
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_merge_size() // adding max size of execution payload (~16gb)
|
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_merge_size() // adding max size of execution payload (~16gb)
|
||||||
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
|
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
|
||||||
|
|
||||||
pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX
|
pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
|
||||||
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_capella_size() // adding max size of execution payload (~16gb)
|
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_capella_size() // adding max size of execution payload (~16gb)
|
||||||
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
|
+ ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field
|
||||||
|
|
||||||
pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX
|
pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD
|
||||||
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb)
|
+ types::ExecutionPayload::<MainnetEthSpec>::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb)
|
||||||
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
|
+ ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload`
|
||||||
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
|
+ (<types::KzgCommitment as Encode>::ssz_fixed_len() * <MainnetEthSpec>::max_blobs_per_block())
|
||||||
|
@ -23,9 +23,8 @@ pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change";
|
|||||||
pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
|
pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
|
||||||
pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
|
pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
|
||||||
|
|
||||||
pub const CORE_TOPICS: [GossipKind; 8] = [
|
pub const CORE_TOPICS: [GossipKind; 7] = [
|
||||||
GossipKind::BeaconBlock,
|
GossipKind::BeaconBlock,
|
||||||
GossipKind::BeaconBlocksAndBlobsSidecar,
|
|
||||||
GossipKind::BeaconAggregateAndProof,
|
GossipKind::BeaconAggregateAndProof,
|
||||||
GossipKind::VoluntaryExit,
|
GossipKind::VoluntaryExit,
|
||||||
GossipKind::ProposerSlashing,
|
GossipKind::ProposerSlashing,
|
||||||
|
@ -1551,7 +1551,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
gossip_block_queue.len() as i64,
|
gossip_block_queue.len() as i64,
|
||||||
);
|
);
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL,
|
&metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL,
|
||||||
rpc_block_queue.len() as i64,
|
rpc_block_queue.len() as i64,
|
||||||
);
|
);
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
|
@ -182,6 +182,7 @@ mod attestation_service {
|
|||||||
#[cfg(feature = "deterministic_long_lived_attnets")]
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD;
|
use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -290,6 +291,7 @@ mod attestation_service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test to verify that we are not unsubscribing to a subnet before a required subscription.
|
/// Test to verify that we are not unsubscribing to a subnet before a required subscription.
|
||||||
|
#[cfg(not(windows))]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_same_subnet_unsubscription() {
|
async fn test_same_subnet_unsubscription() {
|
||||||
// subscription config
|
// subscription config
|
||||||
@ -513,6 +515,7 @@ mod attestation_service {
|
|||||||
assert_eq!(unexpected_msg_count, 0);
|
assert_eq!(unexpected_msg_count, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_subscribe_same_subnet_several_slots_apart() {
|
async fn test_subscribe_same_subnet_several_slots_apart() {
|
||||||
// subscription config
|
// subscription config
|
||||||
|
@ -242,6 +242,20 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV14<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> StoreItem for PersistedOperationPoolV15<T> {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::OpPool
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||||
|
PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
||||||
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
|
@ -46,9 +46,8 @@ pub enum Error {
|
|||||||
},
|
},
|
||||||
BlockReplayError(BlockReplayError),
|
BlockReplayError(BlockReplayError),
|
||||||
AddPayloadLogicError,
|
AddPayloadLogicError,
|
||||||
ResyncRequiredForExecutionPayloadSeparation,
|
|
||||||
SlotClockUnavailableForMigration,
|
SlotClockUnavailableForMigration,
|
||||||
V9MigrationFailure(Hash256),
|
UnableToDowngrade,
|
||||||
InconsistentFork(InconsistentFork),
|
InconsistentFork(InconsistentFork),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1340,6 +1340,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
&self.spec
|
&self.spec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a reference to the `Logger` used by the database.
|
||||||
|
pub fn logger(&self) -> &Logger {
|
||||||
|
&self.log
|
||||||
|
}
|
||||||
|
|
||||||
/// Fetch a copy of the current split slot from memory.
|
/// Fetch a copy of the current split slot from memory.
|
||||||
pub fn get_split_slot(&self) -> Slot {
|
pub fn get_split_slot(&self) -> Slot {
|
||||||
self.split.read_recursive().slot
|
self.split.read_recursive().slot
|
||||||
|
@ -33,6 +33,11 @@
|
|||||||
* [Authorization Header](./api-vc-auth-header.md)
|
* [Authorization Header](./api-vc-auth-header.md)
|
||||||
* [Signature Header](./api-vc-sig-header.md)
|
* [Signature Header](./api-vc-sig-header.md)
|
||||||
* [Prometheus Metrics](./advanced_metrics.md)
|
* [Prometheus Metrics](./advanced_metrics.md)
|
||||||
|
* [Lighthouse UI (Siren)](./lighthouse-ui.md)
|
||||||
|
* [Installation](./ui-installation.md)
|
||||||
|
* [Configuration](./ui-configuration.md)
|
||||||
|
* [Usage](./ui-usage.md)
|
||||||
|
* [FAQs](./ui-faqs.md)
|
||||||
* [Advanced Usage](./advanced.md)
|
* [Advanced Usage](./advanced.md)
|
||||||
* [Checkpoint Sync](./checkpoint-sync.md)
|
* [Checkpoint Sync](./checkpoint-sync.md)
|
||||||
* [Custom Data Directories](./advanced-datadir.md)
|
* [Custom Data Directories](./advanced-datadir.md)
|
||||||
|
@ -26,10 +26,16 @@ validator client or the slasher**.
|
|||||||
| v3.1.0 | Sep 2022 | v12 | yes |
|
| v3.1.0 | Sep 2022 | v12 | yes |
|
||||||
| v3.2.0 | Oct 2022 | v12 | yes |
|
| v3.2.0 | Oct 2022 | v12 | yes |
|
||||||
| v3.3.0 | Nov 2022 | v13 | yes |
|
| v3.3.0 | Nov 2022 | v13 | yes |
|
||||||
|
| v3.4.0 | Jan 2023 | v13 | yes |
|
||||||
|
| v3.5.0 | Feb 2023 | v15 | yes before Capella |
|
||||||
|
|
||||||
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
||||||
> (e.g. v2.3.0).
|
> (e.g. v2.3.0).
|
||||||
|
|
||||||
|
> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We
|
||||||
|
usually do this after a major version has been out for a while and everyone has upgraded. In this
|
||||||
|
case the above table will continue to record the deprecated schema changes for reference.
|
||||||
|
|
||||||
## How to apply a database downgrade
|
## How to apply a database downgrade
|
||||||
|
|
||||||
To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters.
|
To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters.
|
||||||
|
BIN
book/src/imgs/ui-account-earnings.png
Normal file
After Width: | Height: | Size: 866 KiB |
BIN
book/src/imgs/ui-balance-modal.png
Normal file
After Width: | Height: | Size: 43 KiB |
BIN
book/src/imgs/ui-configuration.png
Normal file
After Width: | Height: | Size: 108 KiB |
BIN
book/src/imgs/ui-dashboard.png
Normal file
After Width: | Height: | Size: 1.4 MiB |
BIN
book/src/imgs/ui-device.png
Normal file
After Width: | Height: | Size: 56 KiB |
BIN
book/src/imgs/ui-hardware.png
Normal file
After Width: | Height: | Size: 71 KiB |
BIN
book/src/imgs/ui-settings.png
Normal file
After Width: | Height: | Size: 346 KiB |
BIN
book/src/imgs/ui-validator-balance1.png
Normal file
After Width: | Height: | Size: 66 KiB |
BIN
book/src/imgs/ui-validator-balance2.png
Normal file
After Width: | Height: | Size: 89 KiB |
BIN
book/src/imgs/ui-validator-management.png
Normal file
After Width: | Height: | Size: 383 KiB |
BIN
book/src/imgs/ui-validator-modal.png
Normal file
After Width: | Height: | Size: 333 KiB |
BIN
book/src/imgs/ui-validator-table.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
book/src/imgs/ui.png
Normal file
After Width: | Height: | Size: 364 KiB |
@ -133,6 +133,15 @@ Commonly used features include:
|
|||||||
* `slasher-lmdb`: support for the LMDB slasher backend.
|
* `slasher-lmdb`: support for the LMDB slasher backend.
|
||||||
* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS.
|
* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS.
|
||||||
Not supported on Windows.
|
Not supported on Windows.
|
||||||
|
* `spec-minimal`: support for the minimal preset (useful for testing).
|
||||||
|
|
||||||
|
Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features`
|
||||||
|
argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable.
|
||||||
|
E.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
CARGO_INSTALL_EXTRA_FLAGS="--no-default-features" make
|
||||||
|
```
|
||||||
|
|
||||||
[jemalloc]: https://jemalloc.net/
|
[jemalloc]: https://jemalloc.net/
|
||||||
|
|
||||||
|
33
book/src/lighthouse-ui.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# Lighthouse UI (Siren)
|
||||||
|
|
||||||
|
_Documentation for Siren users and developers._
|
||||||
|
|
||||||
|
[![Chat Badge]][Chat Link]
|
||||||
|
|
||||||
|
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
|
||||||
|
[Chat Link]: https://discord.gg/cyAszAh
|
||||||
|
|
||||||
|
![ui-overview](./imgs/ui.png)
|
||||||
|
|
||||||
|
Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and
|
||||||
|
a Lighthouse Validator Client to monitor performance and display key validator
|
||||||
|
metrics.
|
||||||
|
|
||||||
|
The UI is currently in active development. Its resides in the
|
||||||
|
[Siren](https://github.com/sigp/siren) repository.
|
||||||
|
|
||||||
|
## Topics
|
||||||
|
|
||||||
|
See the following Siren specific topics for more context-specific
|
||||||
|
information:
|
||||||
|
|
||||||
|
- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI.
|
||||||
|
- [Configuration Guide](./ui-configuration.md) - Explanation of how to setup
|
||||||
|
and configure Siren.
|
||||||
|
- [Usage](./ui-usage.md) - Details various Siren components.
|
||||||
|
- [FAQs](./ui-faqs.md) - Frequently Asked Questions.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
If you find and issue or bug or would otherwise like to help out with the
|
||||||
|
development of the Siren project, please submit issues and PRs to the [Siren](https://github.com/sigp/siren) repository.
|
47
book/src/ui-configuration.md
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# Configuration
|
||||||
|
|
||||||
|
Siren requires a connection to both a Lighthouse Validator Client
|
||||||
|
and a Lighthouse Beacon Node. Upon running you will first be greeted by the
|
||||||
|
following configuration screen.
|
||||||
|
|
||||||
|
![ui-configuration](./imgs/ui-configuration.png)
|
||||||
|
|
||||||
|
|
||||||
|
## Connecting to the Clients
|
||||||
|
|
||||||
|
This allows you to enter the address and ports of the associated Lighthouse
|
||||||
|
Beacon node and Lighthouse Validator client.
|
||||||
|
|
||||||
|
> The Beacon Node must be run with the `--gui` flag set. To allow the browser
|
||||||
|
> to access the node beyond your local computer you also need to allow CORS in
|
||||||
|
> the http API. This can be done via `--http-allow-origin "*"`.
|
||||||
|
|
||||||
|
A green tick will appear once Siren is able to connect to both clients. You
|
||||||
|
can specify different ports for each client by clicking on the advanced tab.
|
||||||
|
|
||||||
|
|
||||||
|
## API Token
|
||||||
|
|
||||||
|
The API Token is a secret key that allows you to connect to the validator
|
||||||
|
client. The validator client's HTTP API is guarded by this key because it
|
||||||
|
contains sensitive validator information and the ability to modify
|
||||||
|
validators. Please see [`Validator Authorization`](./api-vc-auth-header.md)
|
||||||
|
for further details.
|
||||||
|
|
||||||
|
Siren requires this token in order to connect to the Validator client.
|
||||||
|
The token is located in the default data directory of the validator
|
||||||
|
client. The default path is
|
||||||
|
`~/.lighthouse/<network>/validators/api-token.txt`.
|
||||||
|
|
||||||
|
The contents of this file for the desired valdiator client needs to be
|
||||||
|
entered.
|
||||||
|
|
||||||
|
## Name
|
||||||
|
|
||||||
|
This is your name, it can be modified and is solely used for aesthetics.
|
||||||
|
|
||||||
|
## Device
|
||||||
|
|
||||||
|
This is a name that can be associated with the validator client/beacon
|
||||||
|
node pair. Multiple such pairs can be remembered for quick swapping between
|
||||||
|
them.
|
13
book/src/ui-faqs.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Frequently Asked Questions
|
||||||
|
|
||||||
|
## 1. Where can I find my API token?
|
||||||
|
The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./ui-configuration.md#api-token).
|
||||||
|
|
||||||
|
## 2. How do I fix the Node Network Errors?
|
||||||
|
If you recieve a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients).
|
||||||
|
|
||||||
|
## 3. How do I change my Beacon or Validator address after logging in?
|
||||||
|
Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configurtion` action button that will redirect you back to the configuration screen where you can make appropriate changes.
|
||||||
|
|
||||||
|
## 4. Why doesn't my validator balance graph show any data?
|
||||||
|
If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min.
|
103
book/src/ui-installation.md
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# 📦 Installation
|
||||||
|
|
||||||
|
Siren runs on Linux, MacOS and Windows.
|
||||||
|
|
||||||
|
|
||||||
|
## Pre-Built Electron Packages
|
||||||
|
|
||||||
|
There are pre-compiled electron packages for each operating systems which can
|
||||||
|
be downloaded and executed. These can be found on the
|
||||||
|
[releases](https://github.com/sigp/siren/releases) page of the
|
||||||
|
Siren repository.
|
||||||
|
|
||||||
|
Simply download the package specific to your operating system and run it.
|
||||||
|
|
||||||
|
## Building From Source
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
Building from source requires `Node v18` and `yarn`.
|
||||||
|
|
||||||
|
### Building From Source
|
||||||
|
|
||||||
|
The electron app can be built from source by first cloning the repository and
|
||||||
|
entering the directory:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ git clone https://github.com/sigp/siren.git
|
||||||
|
$ cd siren
|
||||||
|
```
|
||||||
|
|
||||||
|
Once cloned, the electron app can be built and ran via the Makefile by:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make
|
||||||
|
```
|
||||||
|
|
||||||
|
alternatively it can be built via:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ yarn
|
||||||
|
```
|
||||||
|
|
||||||
|
Once completed successfully the electron app can be run via:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ yarn dev
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running In The Browser
|
||||||
|
|
||||||
|
#### Docker (Recommended)
|
||||||
|
|
||||||
|
Docker is the recommended way to run a webserver that hosts Siren and can be
|
||||||
|
connected to via a web browser. We recommend this method as it establishes a
|
||||||
|
production-grade web-server to host the application.
|
||||||
|
|
||||||
|
`docker` is required to be installed with the service running.
|
||||||
|
|
||||||
|
The docker image can be built and run via the Makefile by running:
|
||||||
|
```
|
||||||
|
$ make docker
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, to run with Docker, the image needs to be built. From the repository directory
|
||||||
|
run:
|
||||||
|
```
|
||||||
|
$ docker build -t siren .
|
||||||
|
```
|
||||||
|
|
||||||
|
Then to run the image:
|
||||||
|
```
|
||||||
|
$ docker run --rm -ti --name siren -p 80:80 siren
|
||||||
|
```
|
||||||
|
|
||||||
|
This will open port 80 and allow your browser to connect. You can choose
|
||||||
|
another local port by modifying the command. For example `-p 8000:80` will open
|
||||||
|
port 8000.
|
||||||
|
|
||||||
|
To view Siren, simply go to `http://localhost` in your web browser.
|
||||||
|
|
||||||
|
#### Development Server
|
||||||
|
|
||||||
|
A development server can also be built which will expose a local port 3000 via:
|
||||||
|
```
|
||||||
|
$ yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
Once executed, you can direct your web browser to the following URL to interact
|
||||||
|
with the app:
|
||||||
|
```
|
||||||
|
http://localhost:3000
|
||||||
|
```
|
||||||
|
|
||||||
|
A production version of the app can be built via
|
||||||
|
```
|
||||||
|
$ yarn build
|
||||||
|
```
|
||||||
|
and then further hosted via a production web server.
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
|
||||||
|
If you experience any issues in running the UI please create an issue on the
|
||||||
|
[Lighthouse UI](https://github.com/sigp/lighthouse-ui) repository.
|
61
book/src/ui-usage.md
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
# Usage
|
||||||
|
|
||||||
|
# Dashboard
|
||||||
|
|
||||||
|
Siren's dashboard view provides a summary of all performance and key validator metrics. Sync statuses, uptimes, accumulated rewards, hardware and network metrics are all consolidated on the dashboard for evaluation.
|
||||||
|
|
||||||
|
![](imgs/ui-dashboard.png)
|
||||||
|
|
||||||
|
## Account Earnings
|
||||||
|
|
||||||
|
The account earnings component accumulates reward data from all registered validators providing a summation of total rewards earned while staking. Given current conversion rates, this component also converts your balance into your selected fiat currency.
|
||||||
|
|
||||||
|
Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific timeframe based on current device and network conditions.
|
||||||
|
|
||||||
|
![](imgs/ui-account-earnings.png)
|
||||||
|
|
||||||
|
## Validator Table
|
||||||
|
|
||||||
|
The validator table component is a list of all registered validators, which includes data such as name, index, total balance, earned rewards and current status. Each validator row also contains a link to a detailed data modal and additional data provided by [Beaconcha.in](https://beaconcha.in).
|
||||||
|
|
||||||
|
![](imgs/ui-validator-table.png)
|
||||||
|
|
||||||
|
## Validator Balance Chart
|
||||||
|
|
||||||
|
The validator balance component is a graphical representation of each validator balance over the latest 10 epochs. Take note that only active validators are rendered in the chart visualization.
|
||||||
|
|
||||||
|
![](imgs/ui-validator-balance1.png)
|
||||||
|
|
||||||
|
By clicking on the chart component you can filter selected validators in the render. This call allow for greater resolution in the rendered visualization.
|
||||||
|
|
||||||
|
<img src="imgs/ui-balance-modal.png" width="48%" style="display: inline; float: left; margin-right: 4%"/>
|
||||||
|
|
||||||
|
<img src="imgs/ui-validator-balance2.png" width="48%"/>
|
||||||
|
|
||||||
|
|
||||||
|
## Hardware Usage and Device Diagnostics
|
||||||
|
|
||||||
|
The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node.
|
||||||
|
|
||||||
|
<img height="350" src="imgs/ui-hardware.png" style="display: inline; float: left; margin-right: 25px"/>
|
||||||
|
|
||||||
|
<img height="350" src="imgs/ui-device.png"/>
|
||||||
|
|
||||||
|
|
||||||
|
# Validator Management
|
||||||
|
|
||||||
|
Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator.
|
||||||
|
|
||||||
|
![](imgs/ui-validator-management.png)
|
||||||
|
|
||||||
|
## Validator Modal
|
||||||
|
|
||||||
|
Clicking the validator icon activates a detailed validator modal component. This component also allows users to trigger validator actions and as well to view and update validator graffiti. Each modal contains the validator total income with hourly, daily and weekly earnings estimates.
|
||||||
|
|
||||||
|
<img height="450" src="imgs/ui-validator-modal.png"/>
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
|
||||||
|
Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters.
|
||||||
|
|
||||||
|
![](imgs/ui-settings.png)
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -307,6 +307,5 @@ define_hardcoded_nets!(
|
|||||||
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
|
// Set to `true` if the genesis state can be found in the `built_in_network_configs`
|
||||||
// directory.
|
// directory.
|
||||||
GENESIS_STATE_IS_KNOWN
|
GENESIS_STATE_IS_KNOWN
|
||||||
),
|
)
|
||||||
(eip4844, "eip4844", GENESIS_STATE_IS_KNOWN)
|
|
||||||
);
|
);
|
||||||
|
@ -1 +0,0 @@
|
|||||||
0
|
|
@ -28,6 +28,10 @@ TERMINAL_TOTAL_DIFFICULTY: 17000000000000000
|
|||||||
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
|
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||||
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
|
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
# Capella
|
||||||
|
CAPELLA_FORK_VERSION: 0x90000072
|
||||||
|
CAPELLA_FORK_EPOCH: 56832
|
||||||
|
|
||||||
# Eip4844
|
# Eip4844
|
||||||
EIP4844_FORK_VERSION: 0x03001020
|
EIP4844_FORK_VERSION: 0x03001020
|
||||||
EIP4844_FORK_EPOCH: 18446744073709551615
|
EIP4844_FORK_EPOCH: 18446744073709551615
|
||||||
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
|||||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||||
"--match=thiswillnevermatchlol"
|
"--match=thiswillnevermatchlol"
|
||||||
],
|
],
|
||||||
prefix = "Lighthouse/v3.4.0-",
|
prefix = "Lighthouse/v3.5.0-",
|
||||||
fallback = "Lighthouse/v3.4.0"
|
fallback = "Lighthouse/v3.5.0"
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Returns `VERSION`, but with platform information appended to the end.
|
/// Returns `VERSION`, but with platform information appended to the end.
|
||||||
|
@ -31,6 +31,77 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Inserts a key without removal of potentially expired elements.
|
||||||
|
/// Returns true if the key does not already exist.
|
||||||
|
pub fn raw_insert(&mut self, key: Key) -> bool {
|
||||||
|
// check the cache before removing elements
|
||||||
|
let is_new = self.map.insert(key.clone());
|
||||||
|
|
||||||
|
// add the new key to the list, if it doesn't already exist.
|
||||||
|
if is_new {
|
||||||
|
self.list.push_back(Element {
|
||||||
|
key,
|
||||||
|
inserted: Instant::now(),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
let position = self
|
||||||
|
.list
|
||||||
|
.iter()
|
||||||
|
.position(|e| e.key == key)
|
||||||
|
.expect("Key is not new");
|
||||||
|
let mut element = self
|
||||||
|
.list
|
||||||
|
.remove(position)
|
||||||
|
.expect("Position is not occupied");
|
||||||
|
element.inserted = Instant::now();
|
||||||
|
self.list.push_back(element);
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
self.check_invariant();
|
||||||
|
is_new
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a key from the cache without purging expired elements. Returns true if the key
|
||||||
|
/// existed.
|
||||||
|
pub fn raw_remove(&mut self, key: &Key) -> bool {
|
||||||
|
if self.map.remove(key) {
|
||||||
|
let position = self
|
||||||
|
.list
|
||||||
|
.iter()
|
||||||
|
.position(|e| &e.key == key)
|
||||||
|
.expect("Key must exist");
|
||||||
|
self.list
|
||||||
|
.remove(position)
|
||||||
|
.expect("Position is not occupied");
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all expired elements and returns them
|
||||||
|
pub fn remove_expired(&mut self) -> Vec<Key> {
|
||||||
|
if self.list.is_empty() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut removed_elements = Vec::new();
|
||||||
|
let now = Instant::now();
|
||||||
|
// remove any expired results
|
||||||
|
while let Some(element) = self.list.pop_front() {
|
||||||
|
if element.inserted + self.ttl > now {
|
||||||
|
self.list.push_front(element);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
self.map.remove(&element.key);
|
||||||
|
removed_elements.push(element.key);
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
self.check_invariant();
|
||||||
|
|
||||||
|
removed_elements
|
||||||
|
}
|
||||||
|
|
||||||
// Inserts a new key. It first purges expired elements to do so.
|
// Inserts a new key. It first purges expired elements to do so.
|
||||||
//
|
//
|
||||||
// If the key was not present this returns `true`. If the value was already present this
|
// If the key was not present this returns `true`. If the value was already present this
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use super::SlotClock;
|
use super::SlotClock;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::Slot;
|
use types::Slot;
|
||||||
|
|
||||||
@ -10,7 +11,7 @@ pub struct ManualSlotClock {
|
|||||||
/// Duration from UNIX epoch to genesis.
|
/// Duration from UNIX epoch to genesis.
|
||||||
genesis_duration: Duration,
|
genesis_duration: Duration,
|
||||||
/// Duration from UNIX epoch to right now.
|
/// Duration from UNIX epoch to right now.
|
||||||
current_time: RwLock<Duration>,
|
current_time: Arc<RwLock<Duration>>,
|
||||||
/// The length of each slot.
|
/// The length of each slot.
|
||||||
slot_duration: Duration,
|
slot_duration: Duration,
|
||||||
}
|
}
|
||||||
@ -20,7 +21,7 @@ impl Clone for ManualSlotClock {
|
|||||||
ManualSlotClock {
|
ManualSlotClock {
|
||||||
genesis_slot: self.genesis_slot,
|
genesis_slot: self.genesis_slot,
|
||||||
genesis_duration: self.genesis_duration,
|
genesis_duration: self.genesis_duration,
|
||||||
current_time: RwLock::new(*self.current_time.read()),
|
current_time: Arc::clone(&self.current_time),
|
||||||
slot_duration: self.slot_duration,
|
slot_duration: self.slot_duration,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -90,7 +91,7 @@ impl SlotClock for ManualSlotClock {
|
|||||||
|
|
||||||
Self {
|
Self {
|
||||||
genesis_slot,
|
genesis_slot,
|
||||||
current_time: RwLock::new(genesis_duration),
|
current_time: Arc::new(RwLock::new(genesis_duration)),
|
||||||
genesis_duration,
|
genesis_duration,
|
||||||
slot_duration,
|
slot_duration,
|
||||||
}
|
}
|
||||||
|
@ -413,18 +413,18 @@ where
|
|||||||
AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next)
|
AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next)
|
||||||
.map_err(Error::BeaconStateError)?;
|
.map_err(Error::BeaconStateError)?;
|
||||||
|
|
||||||
// Default any non-merge execution block hashes to 0x000..000.
|
let execution_status = anchor_block.message().execution_payload().map_or_else(
|
||||||
let execution_status = anchor_block.message_merge().map_or_else(
|
// If the block doesn't have an execution payload then it can't have
|
||||||
|()| ExecutionStatus::irrelevant(),
|
// execution enabled.
|
||||||
|message| {
|
|_| ExecutionStatus::irrelevant(),
|
||||||
let execution_payload = &message.body.execution_payload;
|
|execution_payload| {
|
||||||
if execution_payload == &<_>::default() {
|
if execution_payload.is_default_with_empty_roots() {
|
||||||
// A default payload does not have execution enabled.
|
// A default payload does not have execution enabled.
|
||||||
ExecutionStatus::irrelevant()
|
ExecutionStatus::irrelevant()
|
||||||
} else {
|
} else {
|
||||||
// Assume that this payload is valid, since the anchor should be a trusted block and
|
// Assume that this payload is valid, since the anchor should be a trusted block and
|
||||||
// state.
|
// state.
|
||||||
ExecutionStatus::Valid(message.body.execution_payload.block_hash())
|
ExecutionStatus::Valid(execution_payload.block_hash())
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@ -857,8 +857,8 @@ where
|
|||||||
(parent_justified, parent_finalized)
|
(parent_justified, parent_finalized)
|
||||||
} else {
|
} else {
|
||||||
let justification_and_finalization_state = match block {
|
let justification_and_finalization_state = match block {
|
||||||
// FIXME: verify this is correct for Capella/Eip4844 because
|
// TODO(eip4844): Ensure that the final specification
|
||||||
// epoch processing changes in Capella..
|
// does not substantially modify per epoch processing.
|
||||||
BeaconBlockRef::Eip4844(_)
|
BeaconBlockRef::Eip4844(_)
|
||||||
| BeaconBlockRef::Capella(_)
|
| BeaconBlockRef::Capella(_)
|
||||||
| BeaconBlockRef::Merge(_)
|
| BeaconBlockRef::Merge(_)
|
||||||
|
@ -176,6 +176,15 @@ impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList<T, N> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, N: Unsigned> IntoIterator for VariableList<T, N> {
|
||||||
|
type Item = T;
|
||||||
|
type IntoIter = std::vec::IntoIter<T>;
|
||||||
|
|
||||||
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
|
self.vec.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, N: Unsigned> tree_hash::TreeHash for VariableList<T, N>
|
impl<T, N: Unsigned> tree_hash::TreeHash for VariableList<T, N>
|
||||||
where
|
where
|
||||||
T: tree_hash::TreeHash,
|
T: tree_hash::TreeHash,
|
||||||
|
@ -180,7 +180,15 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
process_blob_kzg_commitments(block.body(), ctxt)?;
|
process_blob_kzg_commitments(block.body(), ctxt)?;
|
||||||
|
=======
|
||||||
|
// Eip4844 specifications are not yet released so additional care is taken
|
||||||
|
// to ensure the code does not run in production.
|
||||||
|
if matches!(block, BeaconBlockRef::Eip4844(_)) {
|
||||||
|
process_blob_kzg_commitments(block.body())?;
|
||||||
|
}
|
||||||
|
>>>>>>> unstable
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -10,8 +10,8 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
|||||||
BlockOperationError::invalid(reason)
|
BlockOperationError::invalid(reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indicates if a `BlsToExecutionChange` is valid to be included in a block in the current epoch of the given
|
/// Indicates if a `BlsToExecutionChange` is valid to be included in a block,
|
||||||
/// state.
|
/// where the block is being applied to the given `state`.
|
||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity.
|
||||||
pub fn verify_bls_to_execution_change<T: EthSpec>(
|
pub fn verify_bls_to_execution_change<T: EthSpec>(
|
||||||
|
@ -485,6 +485,52 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockMerg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockCapella<T, Payload> {
|
||||||
|
/// Return a Capella block where the block has maximum size.
|
||||||
|
pub fn full(spec: &ChainSpec) -> Self {
|
||||||
|
let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec);
|
||||||
|
let bls_to_execution_changes = vec![
|
||||||
|
SignedBlsToExecutionChange {
|
||||||
|
message: BlsToExecutionChange {
|
||||||
|
validator_index: 0,
|
||||||
|
from_bls_pubkey: PublicKeyBytes::empty(),
|
||||||
|
to_execution_address: Address::zero(),
|
||||||
|
},
|
||||||
|
signature: Signature::empty()
|
||||||
|
};
|
||||||
|
T::max_bls_to_execution_changes()
|
||||||
|
]
|
||||||
|
.into();
|
||||||
|
let sync_aggregate = SyncAggregate {
|
||||||
|
sync_committee_signature: AggregateSignature::empty(),
|
||||||
|
sync_committee_bits: BitVector::default(),
|
||||||
|
};
|
||||||
|
BeaconBlockCapella {
|
||||||
|
slot: spec.genesis_slot,
|
||||||
|
proposer_index: 0,
|
||||||
|
parent_root: Hash256::zero(),
|
||||||
|
state_root: Hash256::zero(),
|
||||||
|
body: BeaconBlockBodyCapella {
|
||||||
|
proposer_slashings: base_block.body.proposer_slashings,
|
||||||
|
attester_slashings: base_block.body.attester_slashings,
|
||||||
|
attestations: base_block.body.attestations,
|
||||||
|
deposits: base_block.body.deposits,
|
||||||
|
voluntary_exits: base_block.body.voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
|
sync_aggregate,
|
||||||
|
randao_reveal: Signature::empty(),
|
||||||
|
eth1_data: Eth1Data {
|
||||||
|
deposit_root: Hash256::zero(),
|
||||||
|
block_hash: Hash256::zero(),
|
||||||
|
deposit_count: 0,
|
||||||
|
},
|
||||||
|
graffiti: Graffiti::default(),
|
||||||
|
execution_payload: Payload::Capella::default(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockCapella<T, Payload> {
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> EmptyBlock for BeaconBlockCapella<T, Payload> {
|
||||||
/// Returns an empty Capella block to be used during genesis.
|
/// Returns an empty Capella block to be used during genesis.
|
||||||
fn empty(spec: &ChainSpec) -> Self {
|
fn empty(spec: &ChainSpec) -> Self {
|
||||||
@ -752,19 +798,65 @@ mod tests {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_capella_block() {
|
||||||
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
|
let spec = &ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec());
|
||||||
|
|
||||||
|
let inner_block = BeaconBlockCapella {
|
||||||
|
slot: Slot::random_for_test(rng),
|
||||||
|
proposer_index: u64::random_for_test(rng),
|
||||||
|
parent_root: Hash256::random_for_test(rng),
|
||||||
|
state_root: Hash256::random_for_test(rng),
|
||||||
|
body: BeaconBlockBodyCapella::random_for_test(rng),
|
||||||
|
};
|
||||||
|
let block = BeaconBlock::Capella(inner_block.clone());
|
||||||
|
|
||||||
|
test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| {
|
||||||
|
BeaconBlock::from_ssz_bytes(bytes, spec)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_4844_block() {
|
||||||
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
|
let spec = &ForkName::Eip4844.make_genesis_spec(MainnetEthSpec::default_spec());
|
||||||
|
|
||||||
|
let inner_block = BeaconBlockEip4844 {
|
||||||
|
slot: Slot::random_for_test(rng),
|
||||||
|
proposer_index: u64::random_for_test(rng),
|
||||||
|
parent_root: Hash256::random_for_test(rng),
|
||||||
|
state_root: Hash256::random_for_test(rng),
|
||||||
|
body: BeaconBlockBodyEip4844::random_for_test(rng),
|
||||||
|
};
|
||||||
|
let block = BeaconBlock::Eip4844(inner_block.clone());
|
||||||
|
|
||||||
|
test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| {
|
||||||
|
BeaconBlock::from_ssz_bytes(bytes, spec)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decode_base_and_altair() {
|
fn decode_base_and_altair() {
|
||||||
type E = MainnetEthSpec;
|
type E = MainnetEthSpec;
|
||||||
let spec = E::default_spec();
|
let mut spec = E::default_spec();
|
||||||
|
|
||||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
|
|
||||||
let fork_epoch = spec.altair_fork_epoch.unwrap();
|
let altair_fork_epoch = spec.altair_fork_epoch.unwrap();
|
||||||
|
|
||||||
let base_epoch = fork_epoch.saturating_sub(1_u64);
|
let base_epoch = altair_fork_epoch.saturating_sub(1_u64);
|
||||||
let base_slot = base_epoch.end_slot(E::slots_per_epoch());
|
let base_slot = base_epoch.end_slot(E::slots_per_epoch());
|
||||||
let altair_epoch = fork_epoch;
|
let altair_epoch = altair_fork_epoch;
|
||||||
let altair_slot = altair_epoch.start_slot(E::slots_per_epoch());
|
let altair_slot = altair_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let capella_epoch = altair_fork_epoch + 1;
|
||||||
|
let capella_slot = capella_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let eip4844_epoch = capella_epoch + 1;
|
||||||
|
let eip4844_slot = eip4844_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
|
||||||
|
spec.altair_fork_epoch = Some(altair_epoch);
|
||||||
|
spec.capella_fork_epoch = Some(capella_epoch);
|
||||||
|
spec.eip4844_fork_epoch = Some(eip4844_epoch);
|
||||||
|
|
||||||
// BeaconBlockBase
|
// BeaconBlockBase
|
||||||
{
|
{
|
||||||
@ -809,5 +901,49 @@ mod tests {
|
|||||||
BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec)
|
BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec)
|
||||||
.expect_err("bad altair block cannot be decoded");
|
.expect_err("bad altair block cannot be decoded");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BeaconBlockCapella
|
||||||
|
{
|
||||||
|
let good_block = BeaconBlock::Capella(BeaconBlockCapella {
|
||||||
|
slot: capella_slot,
|
||||||
|
..<_>::random_for_test(rng)
|
||||||
|
});
|
||||||
|
// It's invalid to have an Capella block with a epoch lower than the fork epoch.
|
||||||
|
let bad_block = {
|
||||||
|
let mut bad = good_block.clone();
|
||||||
|
*bad.slot_mut() = altair_slot;
|
||||||
|
bad
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec)
|
||||||
|
.expect("good capella block can be decoded"),
|
||||||
|
good_block
|
||||||
|
);
|
||||||
|
BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec)
|
||||||
|
.expect_err("bad capella block cannot be decoded");
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeaconBlockEip4844
|
||||||
|
{
|
||||||
|
let good_block = BeaconBlock::Eip4844(BeaconBlockEip4844 {
|
||||||
|
slot: eip4844_slot,
|
||||||
|
..<_>::random_for_test(rng)
|
||||||
|
});
|
||||||
|
// It's invalid to have an Capella block with a epoch lower than the fork epoch.
|
||||||
|
let bad_block = {
|
||||||
|
let mut bad = good_block.clone();
|
||||||
|
*bad.slot_mut() = capella_slot;
|
||||||
|
bad
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec)
|
||||||
|
.expect("good eip4844 block can be decoded"),
|
||||||
|
good_block
|
||||||
|
);
|
||||||
|
BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec)
|
||||||
|
.expect_err("bad eip4844 block cannot be decoded");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -710,7 +710,6 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
.ok_or(Error::ShuffleIndexOutOfBounds(index))
|
.ok_or(Error::ShuffleIndexOutOfBounds(index))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: check this implementation
|
|
||||||
/// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`.
|
/// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`.
|
||||||
pub fn latest_execution_payload_header(&self) -> Result<ExecutionPayloadHeaderRef<T>, Error> {
|
pub fn latest_execution_payload_header(&self) -> Result<ExecutionPayloadHeaderRef<T>, Error> {
|
||||||
match self {
|
match self {
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
/// Note: this object does not actually exist in the spec.
|
|
||||||
///
|
|
||||||
/// We use it for managing attestations that have not been aggregated.
|
|
||||||
use super::{AttestationData, Signature};
|
|
||||||
use serde_derive::Serialize;
|
|
||||||
|
|
||||||
#[derive(arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize)]
|
|
||||||
pub struct FreeAttestation {
|
|
||||||
pub data: AttestationData,
|
|
||||||
pub signature: Signature,
|
|
||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
|
||||||
pub validator_index: u64,
|
|
||||||
}
|
|
@ -47,7 +47,6 @@ pub mod fork;
|
|||||||
pub mod fork_data;
|
pub mod fork_data;
|
||||||
pub mod fork_name;
|
pub mod fork_name;
|
||||||
pub mod fork_versioned_response;
|
pub mod fork_versioned_response;
|
||||||
pub mod free_attestation;
|
|
||||||
pub mod graffiti;
|
pub mod graffiti;
|
||||||
pub mod historical_batch;
|
pub mod historical_batch;
|
||||||
pub mod historical_summary;
|
pub mod historical_summary;
|
||||||
@ -154,7 +153,6 @@ pub use crate::fork_name::{ForkName, InconsistentFork};
|
|||||||
pub use crate::fork_versioned_response::{
|
pub use crate::fork_versioned_response::{
|
||||||
ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse,
|
ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse,
|
||||||
};
|
};
|
||||||
pub use crate::free_attestation::FreeAttestation;
|
|
||||||
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
||||||
pub use crate::historical_batch::HistoricalBatch;
|
pub use crate::historical_batch::HistoricalBatch;
|
||||||
pub use crate::indexed_attestation::IndexedAttestation;
|
pub use crate::indexed_attestation::IndexedAttestation;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lcli"
|
name = "lcli"
|
||||||
description = "Lighthouse CLI (modeled after zcli)"
|
description = "Lighthouse CLI (modeled after zcli)"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lighthouse"
|
name = "lighthouse"
|
||||||
version = "3.4.0"
|
version = "3.5.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
autotests = false
|
autotests = false
|
||||||
@ -55,7 +55,7 @@ malloc_utils = { path = "../common/malloc_utils" }
|
|||||||
directory = { path = "../common/directory" }
|
directory = { path = "../common/directory" }
|
||||||
unused_port = { path = "../common/unused_port" }
|
unused_port = { path = "../common/unused_port" }
|
||||||
database_manager = { path = "../database_manager" }
|
database_manager = { path = "../database_manager" }
|
||||||
slasher = { path = "../slasher" }
|
slasher = { path = "../slasher", default-features = false }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
|
@ -1457,7 +1457,7 @@ fn slasher_slot_offset_flag() {
|
|||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("slasher", None)
|
.flag("slasher", None)
|
||||||
.flag("slasher-slot-offset", Some("11.25"))
|
.flag("slasher-slot-offset", Some("11.25"))
|
||||||
.run()
|
.run_with_zero_port()
|
||||||
.with_config(|config| {
|
.with_config(|config| {
|
||||||
let slasher_config = config.slasher.as_ref().unwrap();
|
let slasher_config = config.slasher.as_ref().unwrap();
|
||||||
assert_eq!(slasher_config.slot_offset, 11.25);
|
assert_eq!(slasher_config.slot_offset, 11.25);
|
||||||
@ -1469,7 +1469,7 @@ fn slasher_slot_offset_nan_flag() {
|
|||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("slasher", None)
|
.flag("slasher", None)
|
||||||
.flag("slasher-slot-offset", Some("NaN"))
|
.flag("slasher-slot-offset", Some("NaN"))
|
||||||
.run();
|
.run_with_zero_port();
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn slasher_history_length_flag() {
|
fn slasher_history_length_flag() {
|
||||||
@ -1504,7 +1504,7 @@ fn slasher_attestation_cache_size_flag() {
|
|||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("slasher", None)
|
.flag("slasher", None)
|
||||||
.flag("slasher-att-cache-size", Some("10000"))
|
.flag("slasher-att-cache-size", Some("10000"))
|
||||||
.run()
|
.run_with_zero_port()
|
||||||
.with_config(|config| {
|
.with_config(|config| {
|
||||||
let slasher_config = config
|
let slasher_config = config
|
||||||
.slasher
|
.slasher
|
||||||
@ -1608,23 +1608,25 @@ fn ensure_panic_on_failed_launch() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn enable_proposer_re_orgs_default() {
|
fn enable_proposer_re_orgs_default() {
|
||||||
CommandLineTest::new().run().with_config(|config| {
|
CommandLineTest::new()
|
||||||
assert_eq!(
|
.run_with_zero_port()
|
||||||
config.chain.re_org_threshold,
|
.with_config(|config| {
|
||||||
Some(DEFAULT_RE_ORG_THRESHOLD)
|
assert_eq!(
|
||||||
);
|
config.chain.re_org_threshold,
|
||||||
assert_eq!(
|
Some(DEFAULT_RE_ORG_THRESHOLD)
|
||||||
config.chain.re_org_max_epochs_since_finalization,
|
);
|
||||||
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
assert_eq!(
|
||||||
);
|
config.chain.re_org_max_epochs_since_finalization,
|
||||||
});
|
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
|
||||||
|
);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn disable_proposer_re_orgs() {
|
fn disable_proposer_re_orgs() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("disable-proposer-reorgs", None)
|
.flag("disable-proposer-reorgs", None)
|
||||||
.run()
|
.run_with_zero_port()
|
||||||
.with_config(|config| assert_eq!(config.chain.re_org_threshold, None));
|
.with_config(|config| assert_eq!(config.chain.re_org_threshold, None));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1632,7 +1634,7 @@ fn disable_proposer_re_orgs() {
|
|||||||
fn proposer_re_org_threshold() {
|
fn proposer_re_org_threshold() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("proposer-reorg-threshold", Some("90"))
|
.flag("proposer-reorg-threshold", Some("90"))
|
||||||
.run()
|
.run_with_zero_port()
|
||||||
.with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90));
|
.with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1640,7 +1642,7 @@ fn proposer_re_org_threshold() {
|
|||||||
fn proposer_re_org_max_epochs_since_finalization() {
|
fn proposer_re_org_max_epochs_since_finalization() {
|
||||||
CommandLineTest::new()
|
CommandLineTest::new()
|
||||||
.flag("proposer-reorg-epochs-since-finalization", Some("8"))
|
.flag("proposer-reorg-epochs-since-finalization", Some("8"))
|
||||||
.run()
|
.run_with_zero_port()
|
||||||
.with_config(|config| {
|
.with_config(|config| {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
config.chain.re_org_max_epochs_since_finalization.as_u64(),
|
config.chain.re_org_max_epochs_since_finalization.as_u64(),
|
||||||
|
@ -55,7 +55,7 @@ SECONDS_PER_SLOT=3
|
|||||||
SECONDS_PER_ETH1_BLOCK=1
|
SECONDS_PER_ETH1_BLOCK=1
|
||||||
|
|
||||||
# Proposer score boost percentage
|
# Proposer score boost percentage
|
||||||
PROPOSER_SCORE_BOOST=70
|
PROPOSER_SCORE_BOOST=40
|
||||||
|
|
||||||
# Command line arguments for validator client
|
# Command line arguments for validator client
|
||||||
VC_ARGS=""
|
VC_ARGS=""
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq`
|
# Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq`
|
||||||
|
|
||||||
|
|
||||||
BEHAVIOR=$1
|
BEHAVIOR=$1
|
||||||
|
|
||||||
if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then
|
if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then
|
||||||
@ -9,13 +10,22 @@ if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
exit_if_fails() {
|
||||||
|
echo $@
|
||||||
|
$@
|
||||||
|
EXIT_CODE=$?
|
||||||
|
if [[ $EXIT_CODE -eq 1 ]]; then
|
||||||
|
exit 111
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
source ./vars.env
|
source ./vars.env
|
||||||
|
|
||||||
../local_testnet/clean.sh
|
exit_if_fails ../local_testnet/clean.sh
|
||||||
|
|
||||||
echo "Starting ganache"
|
echo "Starting ganache"
|
||||||
|
|
||||||
../local_testnet/ganache_test_node.sh &> /dev/null &
|
exit_if_fails ../local_testnet/ganache_test_node.sh &> /dev/null &
|
||||||
GANACHE_PID=$!
|
GANACHE_PID=$!
|
||||||
|
|
||||||
# Wait for ganache to start
|
# Wait for ganache to start
|
||||||
@ -23,14 +33,14 @@ sleep 5
|
|||||||
|
|
||||||
echo "Setting up local testnet"
|
echo "Setting up local testnet"
|
||||||
|
|
||||||
../local_testnet/setup.sh
|
exit_if_fails ../local_testnet/setup.sh
|
||||||
|
|
||||||
# Duplicate this directory so slashing protection doesn't keep us from re-using validator keys
|
# Duplicate this directory so slashing protection doesn't keep us from re-using validator keys
|
||||||
cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger
|
exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger
|
||||||
|
|
||||||
echo "Starting bootnode"
|
echo "Starting bootnode"
|
||||||
|
|
||||||
../local_testnet/bootnode.sh &> /dev/null &
|
exit_if_fails ../local_testnet/bootnode.sh &> /dev/null &
|
||||||
BOOT_PID=$!
|
BOOT_PID=$!
|
||||||
|
|
||||||
# wait for the bootnode to start
|
# wait for the bootnode to start
|
||||||
@ -38,20 +48,20 @@ sleep 10
|
|||||||
|
|
||||||
echo "Starting local beacon nodes"
|
echo "Starting local beacon nodes"
|
||||||
|
|
||||||
../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null &
|
exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null &
|
||||||
BEACON_PID=$!
|
BEACON_PID=$!
|
||||||
../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null &
|
exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null &
|
||||||
BEACON_PID2=$!
|
BEACON_PID2=$!
|
||||||
../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null &
|
exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null &
|
||||||
BEACON_PID3=$!
|
BEACON_PID3=$!
|
||||||
|
|
||||||
echo "Starting local validator clients"
|
echo "Starting local validator clients"
|
||||||
|
|
||||||
../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null &
|
exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null &
|
||||||
VALIDATOR_1_PID=$!
|
VALIDATOR_1_PID=$!
|
||||||
../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null &
|
exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null &
|
||||||
VALIDATOR_2_PID=$!
|
VALIDATOR_2_PID=$!
|
||||||
../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null &
|
exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null &
|
||||||
VALIDATOR_3_PID=$!
|
VALIDATOR_3_PID=$!
|
||||||
|
|
||||||
echo "Waiting an epoch before starting the next validator client"
|
echo "Waiting an epoch before starting the next validator client"
|
||||||
@ -73,9 +83,14 @@ if [[ "$BEHAVIOR" == "failure" ]]; then
|
|||||||
|
|
||||||
echo "Done"
|
echo "Done"
|
||||||
|
|
||||||
if [[ $DOPPELGANGER_EXIT -eq 124 ]]; then
|
# We expect to find a doppelganger, exit with success error code if doppelganger was found
|
||||||
|
# and failure if no doppelganger was found.
|
||||||
|
if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$BEHAVIOR" == "success" ]]; then
|
if [[ "$BEHAVIOR" == "success" ]]; then
|
||||||
|
@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3
|
|||||||
# Seconds per Eth1 block
|
# Seconds per Eth1 block
|
||||||
SECONDS_PER_ETH1_BLOCK=1
|
SECONDS_PER_ETH1_BLOCK=1
|
||||||
|
|
||||||
|
# Proposer score boost percentage
|
||||||
|
PROPOSER_SCORE_BOOST=40
|
||||||
|
|
||||||
# Enable doppelganger detection
|
# Enable doppelganger detection
|
||||||
VC_ARGS=" --enable-doppelganger-protection "
|
VC_ARGS=" --enable-doppelganger-protection "
|
||||||
|
@ -9,7 +9,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
|||||||
directory = { path = "../../common/directory" }
|
directory = { path = "../../common/directory" }
|
||||||
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
||||||
network = { path = "../../beacon_node/network" }
|
network = { path = "../../beacon_node/network" }
|
||||||
slasher = { path = ".." }
|
slasher = { path = "..", default-features = false }
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
TESTS_TAG := v1.3.0-rc.1
|
TESTS_TAG := v1.3.0-rc.3
|
||||||
TESTS = general minimal mainnet
|
TESTS = general minimal mainnet
|
||||||
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
||||||
|
|
||||||
|
@ -53,9 +53,11 @@ excluded_paths = [
|
|||||||
"bls12-381-tests/hash_to_G2"
|
"bls12-381-tests/hash_to_G2"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def normalize_path(path):
|
def normalize_path(path):
|
||||||
return path.split("consensus-spec-tests/")[1]
|
return path.split("consensus-spec-tests/")[1]
|
||||||
|
|
||||||
|
|
||||||
# Determine the list of filenames which were accessed during tests.
|
# Determine the list of filenames which were accessed during tests.
|
||||||
passed = set()
|
passed = set()
|
||||||
for line in open(accessed_files_filename, 'r').readlines():
|
for line in open(accessed_files_filename, 'r').readlines():
|
||||||
@ -88,4 +90,5 @@ for root, dirs, files in os.walk(tests_dir_filename):
|
|||||||
# Exit with an error if there were any files missed.
|
# Exit with an error if there were any files missed.
|
||||||
assert len(missed) == 0, "{} missed files".format(len(missed))
|
assert len(missed) == 0, "{} missed files".format(len(missed))
|
||||||
|
|
||||||
print("Accessed {} files ({} intentionally excluded)".format(accessed_files, excluded_files))
|
print("Accessed {} files ({} intentionally excluded)".format(
|
||||||
|
accessed_files, excluded_files))
|
||||||
|
@ -653,6 +653,11 @@ impl<E: EthSpec + TypeName> Handler for MerkleProofValidityHandler<E> {
|
|||||||
|
|
||||||
fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool {
|
fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool {
|
||||||
fork_name != ForkName::Base
|
fork_name != ForkName::Base
|
||||||
|
// Test is skipped due to some changes in the Capella light client
|
||||||
|
// spec.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/4022
|
||||||
|
&& fork_name != ForkName::Capella
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,4 @@ deposit_contract = { path = "../../common/deposit_contract" }
|
|||||||
reqwest = { version = "0.11.0", features = ["json"] }
|
reqwest = { version = "0.11.0", features = ["json"] }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
|
logging = { path = "../../common/logging" }
|
||||||
[features]
|
|
||||||
default = []
|
|
||||||
|
@ -7,7 +7,7 @@ use std::{env, fs::File};
|
|||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use unused_port::unused_tcp_port;
|
use unused_port::unused_tcp_port;
|
||||||
|
|
||||||
const GETH_BRANCH: &str = "master";
|
// const GETH_BRANCH: &str = "master";
|
||||||
const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum";
|
const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum";
|
||||||
|
|
||||||
pub fn build_result(repo_dir: &Path) -> Output {
|
pub fn build_result(repo_dir: &Path) -> Output {
|
||||||
@ -27,7 +27,9 @@ pub fn build(execution_clients_dir: &Path) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the latest tag on the branch
|
// Get the latest tag on the branch
|
||||||
let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap();
|
// TODO: Update when version is corrected
|
||||||
|
// let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap();
|
||||||
|
let last_release = "v1.11.1";
|
||||||
build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap();
|
build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap();
|
||||||
|
|
||||||
// Build geth
|
// Build geth
|
||||||
|
@ -100,7 +100,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password:
|
|||||||
|
|
||||||
impl<E: GenericExecutionEngine> TestRig<E> {
|
impl<E: GenericExecutionEngine> TestRig<E> {
|
||||||
pub fn new(generic_engine: E) -> Self {
|
pub fn new(generic_engine: E) -> Self {
|
||||||
let log = environment::null_logger().unwrap();
|
let log = logging::test_logger();
|
||||||
let runtime = Arc::new(
|
let runtime = Arc::new(
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
@ -270,6 +270,8 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
};
|
};
|
||||||
let proposer_index = 0;
|
let proposer_index = 0;
|
||||||
|
|
||||||
|
// To save sending proposer preparation data, just set the fee recipient
|
||||||
|
// to the fee recipient configured for EE A.
|
||||||
let prepared = self
|
let prepared = self
|
||||||
.ee_a
|
.ee_a
|
||||||
.execution_layer
|
.execution_layer
|
||||||
@ -278,7 +280,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
head_root,
|
head_root,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
// TODO: think about how to test different forks
|
// TODO: think about how to test different forks
|
||||||
PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None),
|
PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
@ -334,6 +336,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.to_payload()
|
.to_payload()
|
||||||
.execution_payload();
|
.execution_payload();
|
||||||
|
assert_eq!(valid_payload.transactions().len(), pending_txs.len());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Execution Engine A:
|
* Execution Engine A:
|
||||||
@ -398,7 +401,6 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(status, PayloadStatus::Valid);
|
assert_eq!(status, PayloadStatus::Valid);
|
||||||
assert_eq!(valid_payload.transactions().len(), pending_txs.len());
|
|
||||||
|
|
||||||
// Verify that all submitted txs were successful
|
// Verify that all submitted txs were successful
|
||||||
for pending_tx in pending_txs {
|
for pending_tx in pending_txs {
|
||||||
@ -489,8 +491,10 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
let head_block_hash = valid_payload.block_hash();
|
let head_block_hash = valid_payload.block_hash();
|
||||||
let finalized_block_hash = ExecutionBlockHash::zero();
|
let finalized_block_hash = ExecutionBlockHash::zero();
|
||||||
// TODO: think about how to handle different forks
|
// TODO: think about how to handle different forks
|
||||||
|
// To save sending proposer preparation data, just set the fee recipient
|
||||||
|
// to the fee recipient configured for EE A.
|
||||||
let payload_attributes =
|
let payload_attributes =
|
||||||
PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None);
|
PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None);
|
||||||
let slot = Slot::new(42);
|
let slot = Slot::new(42);
|
||||||
let head_block_root = Hash256::repeat_byte(100);
|
let head_block_root = Hash256::repeat_byte(100);
|
||||||
let validator_index = 0;
|
let validator_index = 0;
|
||||||
|
@ -121,7 +121,13 @@ pub fn serve<T: EthSpec>(
|
|||||||
.and_then(|ctx: Arc<Context<T>>| async move {
|
.and_then(|ctx: Arc<Context<T>>| async move {
|
||||||
Ok::<_, warp::Rejection>(
|
Ok::<_, warp::Rejection>(
|
||||||
metrics::gather_prometheus_metrics(&ctx)
|
metrics::gather_prometheus_metrics(&ctx)
|
||||||
.map(|body| Response::builder().status(200).body(body).unwrap())
|
.map(|body| {
|
||||||
|
Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "text/plain")
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(500)
|
.status(500)
|
||||||
|