diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index eb927338b..3f0bd6f9b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -65,7 +65,7 @@ jobs: x86_64-portable] features: [ {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, - {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} + {version_suffix: "-dev", env: "jemalloc,spec-minimal"} ] include: - profile: maxperf @@ -77,8 +77,6 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} - FEATURES: ${{ matrix.features.env }} - CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -118,7 +116,6 @@ jobs: --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ - --build-arg FEATURES=${FEATURES} \ --provenance=false \ --push build-docker-multiarch: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 57fee7183..445f71fa0 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -10,7 +10,8 @@ on: pull_request: env: # Deny warnings in CI - RUSTFLAGS: "-D warnings" + # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) + RUSTFLAGS: "-D warnings -C debuginfo=0" # The Nightly version used for cargo-udeps, might need updating from time to time. PINNED_NIGHTLY: nightly-2022-12-15 # Prevent Github API rate limiting. @@ -280,7 +281,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.20' - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' diff --git a/.gitignore b/.gitignore index 7e370ee35..e29a92131 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,8 @@ genesis.ssz # IntelliJ /*.iml +<<<<<<< HEAD # VSCode -/.vscode \ No newline at end of file +/.vscode +.idea diff --git a/Cargo.lock b/Cargo.lock index 3cf33800f..4b0da7bb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.5", + "itoa", "matchit", "memchr", "mime", @@ -621,7 +621,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.4.0" +version = "3.5.0" dependencies = [ "beacon_chain", "clap", @@ -790,7 +790,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.4.0" +version = "3.5.0" dependencies = [ "beacon_node", "clap", @@ -820,18 +820,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -1372,13 +1360,12 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.6" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -1449,9 +1436,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" +checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" dependencies = [ "cc", "cxxbridge-flags", @@ -1461,9 +1448,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" +checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" dependencies = [ "cc", "codespan-reporting", @@ -1476,15 +1463,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" +checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" [[package]] name = "cxxbridge-macro" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" +checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" dependencies = [ "proc-macro2", "quote", @@ -2496,6 +2483,7 @@ dependencies = [ "fork_choice", "futures", "hex", + "logging", "reqwest", "sensitive_url", "serde_json", @@ -2579,9 +2567,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -3003,7 +2991,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tracing", ] @@ -3212,7 +3200,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.5", + "itoa", ] [[package]] @@ -3242,6 +3230,7 @@ dependencies = [ "environment", "eth1", "eth2", + "eth2_serde_utils", "eth2_ssz", "execution_layer", "futures", @@ -3331,7 +3320,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -3495,7 +3484,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", ] [[package]] @@ -3622,12 +3611,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.5" @@ -3769,7 +3752,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.4.0" +version = "3.5.0" dependencies = [ "account_utils", "beacon_chain", @@ -4266,7 +4249,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "webrtc", ] @@ -4375,7 +4358,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.4.0" +version = "3.5.0" dependencies = [ "account_manager", "account_utils", @@ -4442,6 +4425,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", + "lru_cache", "parking_lot 0.12.1", "prometheus-client", "quickcheck", @@ -4784,14 +4768,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -5277,9 +5261,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oneshot_broadcast" @@ -5458,9 +5442,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" +checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -5900,7 +5884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.5", + "itoa", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -6337,7 +6321,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", @@ -6622,7 +6606,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", "scale-info-derive", ] @@ -6868,11 +6852,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -6895,7 +6879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -7032,9 +7016,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -7733,10 +7717,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -7766,7 +7751,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.5", + "itoa", "libc", "num_threads", "serde", @@ -7935,7 +7920,7 @@ dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", ] [[package]] @@ -7985,9 +7970,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -9033,9 +9018,9 @@ dependencies = [ [[package]] name = "webrtc-ice" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" dependencies = [ "arc-swap", "async-trait", diff --git a/Makefile b/Makefile index 8fd502ed2..bf2ad6794 100644 --- a/Makefile +++ b/Makefile @@ -38,15 +38,24 @@ PROFILE ?= release # they run for different forks. FORKS=phase0 altair merge capella eip4844 +# Extra flags for Cargo +CARGO_INSTALL_EXTRA_FLAGS?= + # Builds the Lighthouse binary in release (optimized). # # Binaries will most likely be found in `./target/release` install: - cargo install --path lighthouse --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lighthouse --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # Builds the lcli binary in release (optimized). install-lcli: - cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)" + cargo install --path lcli --force --locked \ + --features "$(FEATURES)" \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) # The following commands use `cross` to build a cross-compile. # @@ -124,7 +133,7 @@ run-ef-tests: test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain + env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index c7962e79a..63f52b22d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.4.0" +version = "3.5.0" authors = ["Paul Hauner ", "Age Manning write!( - f, - "The execution endpoint is connected and configured, \ - however it is not yet synced" - ), CapellaReadiness::NoExecutionEndpoint => write!( f, "The --execution-endpoint flag is not specified, this is a \ @@ -56,8 +49,7 @@ impl fmt::Display for CapellaReadiness { ), CapellaReadiness::V2MethodsNotSupported { error } => write!( f, - "The execution endpoint does not appear to support \ - the required engine api methods for Capella: {}", + "Execution endpoint does not support Capella methods: {}", error ), } @@ -115,12 +107,7 @@ impl BeaconChain { } if all_good { - if !el.is_synced_for_notifier().await { - // The EL is not synced. - CapellaReadiness::NotSynced - } else { - CapellaReadiness::Ready - } + CapellaReadiness::Ready } else { CapellaReadiness::V2MethodsNotSupported { error: missing_methods, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs index 02422a403..be913d8cc 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -2,9 +2,41 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; use operation_pool::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, }; -use slog::{debug, info, Logger}; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; use std::sync::Arc; +use std::time::Duration; use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::{EthSpec, Hash256, Slot}; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} pub fn upgrade_to_v14( db: Arc>, @@ -41,17 +73,35 @@ pub fn downgrade_from_v14( db: Arc>, log: Logger, ) -> Result, Error> { + // We cannot downgrade from V14 once the Capella fork has been reached because there will + // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions + // of Lighthouse can't handle that. + if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { + let current_epoch = get_slot_clock::(&db, &log)? + .and_then(|clock| clock.now()) + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + .ok_or(Error::SlotClockUnavailableForMigration)?; + + if current_epoch >= capella_fork_epoch { + error!( + log, + "Capella already active: v14+ is mandatory"; + "current_epoch" => current_epoch, + "capella_fork_epoch" => capella_fork_epoch, + ); + return Err(Error::UnableToDowngrade); + } + } + // Load a V14 op pool and transform it to V12. - let PersistedOperationPoolV14 { + let PersistedOperationPoolV14:: { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, - } = if let Some(PersistedOperationPool::::V14(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { op_pool } else { debug!(log, "Nothing to do, no operation pool stored"); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs index f4adc2cf4..07c86bd93 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -43,7 +43,7 @@ pub fn downgrade_from_v15( log: Logger, ) -> Result, Error> { // Load a V15 op pool and transform it to V14. - let PersistedOperationPoolV15 { + let PersistedOperationPoolV15:: { attestations, sync_contributions, attester_slashings, @@ -51,9 +51,7 @@ pub fn downgrade_from_v15( voluntary_exits, bls_to_execution_changes, capella_bls_change_broadcast_indices, - } = if let Some(PersistedOperationPool::::V15(op_pool)) = - db.get_item(&OP_POOL_DB_KEY)? - { + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { op_pool } else { debug!(log, "Nothing to do, no operation pool stored"); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index dad5e1517..de2681012 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -29,7 +29,7 @@ const TOTAL_LABEL: &str = "total"; /// The validator monitor collects per-epoch data about each monitored validator. Historical data /// will be kept around for `HISTORIC_EPOCHS` before it is pruned. -pub const HISTORIC_EPOCHS: usize = 4; +pub const HISTORIC_EPOCHS: usize = 10; /// Once the validator monitor reaches this number of validators it will stop /// tracking their metrics/logging individually in an effort to reduce @@ -45,7 +45,7 @@ pub enum Error { /// Contains data pertaining to one validator for one epoch. #[derive(Default)] -struct EpochSummary { +pub struct EpochSummary { /* * Attestations with a target in the current epoch. */ @@ -103,6 +103,12 @@ struct EpochSummary { pub proposer_slashings: usize, /// The number of attester slashings observed. pub attester_slashings: usize, + + /* + * Other validator info helpful for the UI. + */ + /// The total balance of the validator. + pub total_balance: Option, } impl EpochSummary { @@ -176,18 +182,60 @@ impl EpochSummary { pub fn register_attester_slashing(&mut self) { self.attester_slashings += 1; } + + pub fn register_validator_total_balance(&mut self, total_balance: u64) { + self.total_balance = Some(total_balance) + } } type SummaryMap = HashMap; +#[derive(Default)] +pub struct ValidatorMetrics { + pub attestation_hits: u64, + pub attestation_misses: u64, + pub attestation_head_hits: u64, + pub attestation_head_misses: u64, + pub attestation_target_hits: u64, + pub attestation_target_misses: u64, +} + +impl ValidatorMetrics { + pub fn increment_hits(&mut self) { + self.attestation_hits += 1; + } + + pub fn increment_misses(&mut self) { + self.attestation_misses += 1; + } + + pub fn increment_target_hits(&mut self) { + self.attestation_target_hits += 1; + } + + pub fn increment_target_misses(&mut self) { + self.attestation_target_misses += 1; + } + + pub fn increment_head_hits(&mut self) { + self.attestation_head_hits += 1; + } + + pub fn increment_head_misses(&mut self) { + self.attestation_head_misses += 1; + } +} + /// A validator that is being monitored by the `ValidatorMonitor`. -struct MonitoredValidator { +pub struct MonitoredValidator { /// A human-readable identifier for the validator. pub id: String, /// The validator index in the state. pub index: Option, /// A history of the validator over time. pub summaries: RwLock, + /// Validator metrics to be exposed over the HTTP API. + pub metrics: RwLock, } impl MonitoredValidator { @@ -198,6 +246,7 @@ impl MonitoredValidator { .unwrap_or_else(|| pubkey.to_string()), index, summaries: <_>::default(), + metrics: <_>::default(), } } @@ -252,6 +301,20 @@ impl MonitoredValidator { fn touch_epoch_summary(&self, epoch: Epoch) { self.with_epoch_summary(epoch, |_| {}); } + + fn get_from_epoch_summary(&self, epoch: Epoch, func: F) -> Option + where + F: Fn(Option<&EpochSummary>) -> Option, + { + let summaries = self.summaries.read(); + func(summaries.get(&epoch)) + } + + pub fn get_total_balance(&self, epoch: Epoch) -> Option { + self.get_from_epoch_summary(epoch, |summary_opt| { + summary_opt.and_then(|summary| summary.total_balance) + }) + } } /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P @@ -347,12 +410,20 @@ impl ValidatorMonitor { if let Some(i) = monitored_validator.index { monitored_validator.touch_epoch_summary(current_epoch); + let i = i as usize; + + // Cache relevant validator info. + if let Some(balance) = state.balances().get(i) { + monitored_validator.with_epoch_summary(current_epoch, |summary| { + summary.register_validator_total_balance(*balance) + }); + } + // Only log the per-validator metrics if it's enabled. if !self.individual_tracking() { continue; } - let i = i as usize; let id = &monitored_validator.id; if let Some(balance) = state.balances().get(i) { @@ -479,6 +550,25 @@ impl ValidatorMonitor { continue; } + // Store some metrics directly to be re-exposed on the HTTP API. + let mut validator_metrics = monitored_validator.metrics.write(); + if previous_epoch_matched_any { + validator_metrics.increment_hits(); + if previous_epoch_matched_target { + validator_metrics.increment_target_hits() + } else { + validator_metrics.increment_target_misses() + } + if previous_epoch_matched_head { + validator_metrics.increment_head_hits() + } else { + validator_metrics.increment_head_misses() + } + } else { + validator_metrics.increment_misses() + } + drop(validator_metrics); + // Indicates if any attestation made it on-chain. // // For Base states, this will be *any* attestation whatsoever. For Altair states, @@ -717,6 +807,14 @@ impl ValidatorMonitor { self.validators.values().map(|val| val.id.clone()).collect() } + pub fn get_monitored_validator(&self, index: u64) -> Option<&MonitoredValidator> { + if let Some(pubkey) = self.indices.get(&index) { + self.validators.get(pubkey) + } else { + None + } + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6718791d7..33cb58fa7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,6 +2,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; @@ -24,6 +25,7 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, HotColdDB, LevelDB, StoreConfig, @@ -78,6 +80,7 @@ fn get_harness( let harness = TestHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() .build(); @@ -2543,6 +2546,91 @@ async fn revert_minority_fork_on_resume() { assert_eq!(heads.len(), 1); } +// This test checks whether the schema downgrade from the latest version to some minimum supported +// version is correct. This is the easiest schema test to write without historic versions of +// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually +// as old downgrades are deprecated. +#[tokio::test] +async fn schema_downgrade_to_min_version() { + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec.clone(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let min_version = if harness.spec.capella_fork_epoch.is_some() { + // Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that + // at all if Capella is enabled. + SchemaVersion(14) + } else { + SchemaVersion(11) + }; + + // Close the database to ensure everything is written to disk. + drop(store); + drop(harness); + + // Re-open the store. + let store = get_store(&db_path); + + // Downgrade. + let deposit_contract_deploy_block = 0; + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version, + store.logger().clone(), + spec, + ) + .expect("schema downgrade to minimum version should work"); + + // Upgrade back. + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + min_version, + CURRENT_SCHEMA_VERSION, + store.logger().clone(), + spec, + ) + .expect("schema upgrade from minimum version should work"); + + // Rescreate the harness. + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) + .logger(store.logger().clone()) + .resumed_disk_store(store.clone()) + .mock_execution_layer() + .build(); + + check_finalization(&harness, num_blocks_produced); + check_split_slot(&harness, store.clone()); + check_chain_dump(&harness, num_blocks_produced + 1); + check_iterators(&harness); + + // Check that downgrading beyond the minimum version fails (bound is *tight*). + let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap()); + migrate_schema::>( + store.clone(), + deposit_contract_deploy_block, + CURRENT_SCHEMA_VERSION, + min_version_sub_1, + harness.logger().clone(), + spec, + ) + .expect_err("should not downgrade below minimum version"); +} + /// Checks that two chains are the same, for the purpose of these tests. /// /// Several fields that are hard/impossible to check are ignored (e.g., the store). diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9a49843a9..876458eea 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -39,7 +39,7 @@ time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } -slasher = { path = "../../slasher" } +slasher = { path = "../../slasher", default-features = false } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index c1d830bc0..fb8a9b634 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -466,13 +466,14 @@ async fn capella_readiness_logging( error!( log, "Not ready for Capella"; + "hint" => "the execution endpoint may be offline", "info" => %readiness, - "hint" => "try updating Lighthouse and/or the execution layer", ) } readiness => warn!( log, "Not ready for Capella"; + "hint" => "try updating the execution endpoint", "info" => %readiness, ), } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index e66630497..1939007b6 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde_json::json; use std::collections::HashSet; use tokio::sync::Mutex; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use types::EthSpec; pub use deposit_log::{DepositLog, Log}; @@ -566,31 +566,23 @@ pub mod deposit_methods { #[derive(Clone, Debug)] pub struct CapabilitiesCacheEntry { engine_capabilities: EngineCapabilities, - fetch_time: SystemTime, + fetch_time: Instant, } impl CapabilitiesCacheEntry { pub fn new(engine_capabilities: EngineCapabilities) -> Self { Self { engine_capabilities, - fetch_time: SystemTime::now(), + fetch_time: Instant::now(), } } - pub fn engine_capabilities(&self) -> &EngineCapabilities { - &self.engine_capabilities + pub fn engine_capabilities(&self) -> EngineCapabilities { + self.engine_capabilities } pub fn age(&self) -> Duration { - // duration_since() may fail because measurements taken earlier - // are not guaranteed to always be before later measurements - // due to anomalies such as the system clock being adjusted - // either forwards or backwards - // - // In such cases, we'll just say the age is zero - SystemTime::now() - .duration_since(self.fetch_time) - .unwrap_or(Duration::ZERO) + Instant::now().duration_since(self.fetch_time) } /// returns `true` if the entry's age is >= age_limit @@ -841,7 +833,9 @@ impl HttpJsonRpc { Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { execution_payload: payload_v1.into(), - // Have to guess zero here as we don't know the value + // Set the V1 payload values from the EE to be zero. This simulates + // the pre-block-value functionality of always choosing the builder + // block. block_value: Uint256::zero(), })) } @@ -1055,16 +1049,12 @@ impl HttpJsonRpc { ) -> Result { let mut lock = self.engine_capabilities_cache.lock().await; - if lock - .as_ref() - .map_or(true, |entry| entry.older_than(age_limit)) - { + if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { + Ok(lock.engine_capabilities()) + } else { let engine_capabilities = self.exchange_capabilities().await?; *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); Ok(engine_capabilities) - } else { - // here entry is guaranteed to exist so unwrap() is safe - Ok(*lock.as_ref().unwrap().engine_capabilities()) } } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 3a5f98779..ed9a58f26 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -144,7 +144,6 @@ impl From> for JsonExecutionPayloadV2 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -172,7 +171,6 @@ impl From> for JsonExecutionPayloadV3 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -230,7 +228,6 @@ impl From> for ExecutionPayloadCapella withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), @@ -258,7 +255,6 @@ impl From> for ExecutionPayloadEip4844 withdrawals: payload .withdrawals .into_iter() - .cloned() .map(Into::into) .collect::>() .into(), diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index fe4058af0..ce413cb11 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -17,8 +17,7 @@ use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// -/// Since the size of each value is small (~100 bytes) a large number is used for safety. -/// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals +/// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes @@ -276,7 +275,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - (**state, CapabilitiesCacheAction::None) + (**state, CapabilitiesCacheAction::Clear) } Err(e) => { error!( @@ -342,7 +341,7 @@ impl Engine { /// deadlock. pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: FnOnce(&'a Engine) -> G, G: Future>, { match func(self).await { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f07fa7932..86fe89d7b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1345,16 +1345,11 @@ impl ExecutionLayer { .set_latest_forkchoice_state(forkchoice_state) .await; - let payload_attributes_ref = &payload_attributes; let result = self .engine() .request(|engine| async move { engine - .notify_forkchoice_updated( - forkchoice_state, - payload_attributes_ref.clone(), - self.log(), - ) + .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) .await }) .await; @@ -1720,7 +1715,7 @@ impl ExecutionLayer { capella_block .withdrawals .into_iter() - .map(|w| w.into()) + .map(Into::into) .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; @@ -1747,7 +1742,7 @@ impl ExecutionLayer { eip4844_block .withdrawals .into_iter() - .map(|w| w.into()) + .map(Into::into) .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 5110a73ed..0aa626be0 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,6 +36,7 @@ tree_hash = "0.4.1" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +eth2_serde_utils = "0.1.1" operation_pool = { path = "../operation_pool" } [dev-dependencies] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ec3d01085..5b5f0ff3e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2374,11 +2374,19 @@ pub fn serve( .and(not_while_syncing_filter.clone()) .and(warp::query::()) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| async move { + chain: Arc>, + log: Logger| async move { + debug!( + log, + "Block production request from HTTP API"; + "slot" => slot + ); + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { warp_utils::reject::custom_bad_request(format!( "randao reveal is not a valid BLS signature: {:?}", @@ -3125,6 +3133,22 @@ pub fn serve( }, ); + // POST lighthouse/ui/validator_info + let post_lighthouse_ui_validator_info = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_info")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: ui::ValidatorInfoRequestData, chain: Arc>| { + blocking_json_task(move || { + ui::get_validator_info(request_data, chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3660,6 +3684,7 @@ pub fn serve( .or(post_lighthouse_database_historical_blocks.boxed()) .or(post_lighthouse_block_rewards.boxed()) .or(post_lighthouse_ui_validator_metrics.boxed()) + .or(post_lighthouse_ui_validator_info.boxed()) .recover(warp_utils::reject::handle_rejection), )) .recover(warp_utils::reject::handle_rejection) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index e41a90c95..346e802ca 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -5,7 +5,7 @@ use beacon_chain::NotifyExecutionLayer; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{error, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; @@ -29,6 +29,11 @@ pub async fn publish_block( //FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message. //this may skew metrics let block_root = block_root.unwrap_or_else(|| block.canonical_root()); + debug!( + log, + "Signed block published to HTTP API"; + "slot" => block.slot() + ); // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index a5b3a8b2f..e8280a796 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,5 +1,7 @@ -use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::types::ValidatorStatus; +use beacon_chain::{ + validator_monitor::HISTORIC_EPOCHS, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -71,6 +73,82 @@ pub fn get_validator_count( }) } +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoRequestData { + #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + indices: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoValues { + #[serde(with = "eth2_serde_utils::quoted_u64")] + epoch: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + total_balance: u64, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfo { + info: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorInfoResponse { + validators: HashMap, +} + +pub fn get_validator_info( + request_data: ValidatorInfoRequestData, + chain: Arc>, +) -> Result { + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + + let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64(); + + let validator_ids = chain + .validator_monitor + .read() + .get_all_monitored_validators() + .iter() + .cloned() + .collect::>(); + + let indices = request_data + .indices + .iter() + .map(|index| index.to_string()) + .collect::>(); + + let ids = validator_ids + .intersection(&indices) + .collect::>(); + + let mut validators = HashMap::new(); + + for id in ids { + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let mut info = vec![]; + for epoch in epochs.clone() { + if let Some(total_balance) = validator.get_total_balance(Epoch::new(epoch)) { + info.push(ValidatorInfoValues { + epoch, + total_balance, + }); + } + } + validators.insert(id.clone(), ValidatorInfo { info }); + } + } + } + + Ok(ValidatorInfoResponse { validators }) +} + #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorMetricsRequestData { indices: Vec, @@ -119,76 +197,56 @@ pub fn post_validator_monitor_metrics( let mut validators = HashMap::new(); for id in ids { - let attestation_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestations = attestation_hits + attestation_misses; - let attestation_hit_percentage: f64 = if attestations == 0 { - 0.0 - } else { - (100 * attestation_hits / attestations) as f64 - }; + if let Ok(index) = id.parse::() { + if let Some(validator) = chain + .validator_monitor + .read() + .get_monitored_validator(index) + { + let val_metrics = validator.metrics.read(); + let attestation_hits = val_metrics.attestation_hits; + let attestation_misses = val_metrics.attestation_misses; + let attestation_head_hits = val_metrics.attestation_head_hits; + let attestation_head_misses = val_metrics.attestation_head_misses; + let attestation_target_hits = val_metrics.attestation_target_hits; + let attestation_target_misses = val_metrics.attestation_target_misses; + drop(val_metrics); - let attestation_head_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_head_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let head_attestations = attestation_head_hits + attestation_head_misses; - let attestation_head_hit_percentage: f64 = if head_attestations == 0 { - 0.0 - } else { - (100 * attestation_head_hits / head_attestations) as f64 - }; + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; - let attestation_target_hits = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let attestation_target_misses = metrics::get_int_counter( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, - &[id], - ) - .map(|counter| counter.get()) - .unwrap_or(0); - let target_attestations = attestation_target_hits + attestation_target_misses; - let attestation_target_hit_percentage: f64 = if target_attestations == 0 { - 0.0 - } else { - (100 * attestation_target_hits / target_attestations) as f64 - }; + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; - let metrics = ValidatorMetrics { - attestation_hits, - attestation_misses, - attestation_hit_percentage, - attestation_head_hits, - attestation_head_misses, - attestation_head_hit_percentage, - attestation_target_hits, - attestation_target_misses, - attestation_target_hit_percentage, - }; + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + }; - validators.insert(id.clone(), metrics); + validators.insert(id.clone(), metrics); + } + } } Ok(ValidatorMetricsResponse { validators }) diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index dfdb8f7ff..2895506c3 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -116,7 +116,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500) diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index b3e02d4cb..89fde3237 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -1,6 +1,7 @@ use beacon_chain::test_utils::EphemeralHarnessType; use environment::null_logger; use http_metrics::Config; +use reqwest::header::HeaderValue; use reqwest::StatusCode; use std::net::{IpAddr, Ipv4Addr}; use std::sync::Arc; @@ -45,7 +46,13 @@ async fn returns_200_ok() { listening_socket.port() ); - assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); + let response = reqwest::get(&url).await.unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.headers().get("Content-Type").unwrap(), + &HeaderValue::from_str("text/plain").unwrap() + ); } .await } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 4181d7f3c..f956d35d0 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -27,6 +27,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" +lru_cache = { path = "../../common/lru_cache" } parking_lot = "0.12.0" sha2 = "0.10" snap = "1.0.1" diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 9176f16f2..333a6cd91 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -8,6 +8,7 @@ use crate::{Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; +use lru_cache::LRUTimeCache; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; @@ -39,6 +40,9 @@ mod network_behaviour; /// requests. This defines the interval in seconds. const HEARTBEAT_INTERVAL: u64 = 30; +/// The minimum amount of time we allow peers to reconnect to us after a disconnect when we are +/// saturated with peers. This effectively looks like a swarm BAN for this amount of time. +pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600); /// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would /// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet /// peers. @@ -74,6 +78,20 @@ pub struct PeerManager { target_peers: usize, /// Peers queued to be dialed. peers_to_dial: VecDeque<(PeerId, Option)>, + /// The number of temporarily banned peers. This is used to prevent instantaneous + /// reconnection. + // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A + // peer can be in a disconnected state and new connections will be refused and logged as if the + // peer is banned without it being reflected in the peer's state. + // Also the banned state can out-last the peer's reference in the peer db. So peers that are + // unknown to us can still be temporarily banned. This is fundamentally a relationship with + // the swarm. Regardless of our knowledge of the peer in the db, it will be temporarily banned + // at the swarm layer. + // NOTE: An LRUTimeCache is used compared to a structure that needs to be polled to avoid very + // frequent polling to unban peers. Instead, this cache piggy-backs the PeerManager heartbeat + // to update and clear the cache. Therefore the PEER_RECONNECTION_TIMEOUT only has a resolution + // of the HEARTBEAT_INTERVAL. + temporary_banned_peers: LRUTimeCache, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -143,6 +161,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), target_peers: target_peer_count, + temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, @@ -243,6 +262,15 @@ impl PeerManager { reason: Option, ) { match ban_operation { + BanOperation::TemporaryBan => { + // The peer could be temporarily banned. We only do this in the case that + // we have currently reached our peer target limit. + if self.network_globals.connected_peers() >= self.target_peers { + // We have enough peers, prevent this reconnection. + self.temporary_banned_peers.raw_insert(*peer_id); + self.events.push(PeerManagerEvent::Banned(*peer_id, vec![])); + } + } BanOperation::DisconnectThePeer => { // The peer was currently connected, so we start a disconnection. // Once the peer has disconnected, its connection state will transition to a @@ -259,6 +287,11 @@ impl PeerManager { BanOperation::ReadyToBan(banned_ips) => { // The peer is not currently connected, we can safely ban it at the swarm // level. + + // If a peer is being banned, this trumps any temporary ban the peer might be + // under. We no longer track it in the temporary ban list. + self.temporary_banned_peers.raw_remove(peer_id); + // Inform the Swarm to ban the peer self.events .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); @@ -1121,6 +1154,14 @@ impl PeerManager { } } + /// Unbans any temporarily banned peers that have served their timeout. + fn unban_temporary_banned_peers(&mut self) { + for peer_id in self.temporary_banned_peers.remove_expired() { + self.events + .push(PeerManagerEvent::UnBanned(peer_id, Vec::new())); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -1153,6 +1194,9 @@ impl PeerManager { // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); + + // Unban any peers that have served their temporary ban timeout + self.unban_temporary_banned_peers(); } // Update metrics related to peer scoring. diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 42eb270c4..21288473e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -170,7 +170,7 @@ impl PeerManager { BanResult::NotBanned => {} } - // Count dialing peers in the limit if the peer dialied us. + // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits if self.peer_limit_reached(count_dialing) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 1f44488a5..70d3399d6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -844,8 +844,16 @@ impl PeerDB { .collect::>(); return Some(BanOperation::ReadyToBan(banned_ips)); } - PeerConnectionStatus::Disconnecting { .. } - | PeerConnectionStatus::Unknown + PeerConnectionStatus::Disconnecting { .. } => { + // The peer has been disconnected but not banned. Inform the peer manager + // that this peer could be eligible for a temporary ban. + self.disconnected_peers += 1; + info.set_connection_status(PeerConnectionStatus::Disconnected { + since: Instant::now(), + }); + return Some(BanOperation::TemporaryBan); + } + PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } | PeerConnectionStatus::Dialing { .. } => { self.disconnected_peers += 1; @@ -1177,6 +1185,9 @@ impl From> for ScoreUpdateResult { /// When attempting to ban a peer provides the peer manager with the operation that must be taken. pub enum BanOperation { + /// Optionally temporarily ban this peer to prevent instantaneous reconnection. + /// The peer manager will decide if temporary banning is required. + TemporaryBan, // The peer is currently connected. Perform a graceful disconnect before banning at the swarm // level. DisconnectThePeer, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index c0e30281b..f002000a5 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -22,8 +22,9 @@ use tokio_util::{ }; use types::BlobsSidecar; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, EthSpec, - ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockMerge, + EmptyBlock, EthSpec, ForkContext, ForkName, Hash256, MainnetEthSpec, Signature, + SignedBeaconBlock, }; lazy_static! { @@ -62,6 +63,13 @@ lazy_static! { .as_ssz_bytes() .len(); + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -72,11 +80,11 @@ lazy_static! { + types::ExecutionPayload::::max_execution_payload_merge_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + pub static ref SIGNED_BEACON_BLOCK_CAPELLA_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field - pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_ALTAIR_MAX + pub static ref SIGNED_BEACON_BLOCK_EIP4844_MAX: usize = *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_eip4844_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` + (::ssz_fixed_len() * ::max_blobs_per_block()) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index ab7fb722b..b83b03d6b 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -23,9 +23,8 @@ pub const BLS_TO_EXECUTION_CHANGE_TOPIC: &str = "bls_to_execution_change"; pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const CORE_TOPICS: [GossipKind; 8] = [ +pub const CORE_TOPICS: [GossipKind; 7] = [ GossipKind::BeaconBlock, - GossipKind::BeaconBlocksAndBlobsSidecar, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 25f2830b0..73535dc83 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -1551,7 +1551,7 @@ impl BeaconProcessor { gossip_block_queue.len() as i64, ); metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, + &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); metrics::set_gauge( diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 9e1c9f51b..a407fe1bc 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -182,6 +182,7 @@ mod attestation_service { #[cfg(feature = "deterministic_long_lived_attnets")] use std::collections::HashSet; + #[cfg(not(windows))] use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -290,6 +291,7 @@ mod attestation_service { } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[cfg(not(windows))] #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config @@ -513,6 +515,7 @@ mod attestation_service { assert_eq!(unexpected_msg_count, 0); } + #[cfg(not(windows))] #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 65354e01a..35d2b4ce7 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -242,6 +242,20 @@ impl StoreItem for PersistedOperationPoolV14 { } } +impl StoreItem for PersistedOperationPoolV15 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index ac50cc6aa..c19ac0351 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -46,9 +46,8 @@ pub enum Error { }, BlockReplayError(BlockReplayError), AddPayloadLogicError, - ResyncRequiredForExecutionPayloadSeparation, SlotClockUnavailableForMigration, - V9MigrationFailure(Hash256), + UnableToDowngrade, InconsistentFork(InconsistentFork), } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fba0acad0..9762a0f59 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1340,6 +1340,11 @@ impl, Cold: ItemStore> HotColdDB &self.spec } + /// Get a reference to the `Logger` used by the database. + pub fn logger(&self) -> &Logger { + &self.log + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 470407ebe..7def1821d 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -33,6 +33,11 @@ * [Authorization Header](./api-vc-auth-header.md) * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) +* [Lighthouse UI (Siren)](./lighthouse-ui.md) + * [Installation](./ui-installation.md) + * [Configuration](./ui-configuration.md) + * [Usage](./ui-usage.md) + * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 0982e10ab..7219a0f6b 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -26,10 +26,16 @@ validator client or the slasher**. | v3.1.0 | Sep 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes | | v3.3.0 | Nov 2022 | v13 | yes | +| v3.4.0 | Jan 2023 | v13 | yes | +| v3.5.0 | Feb 2023 | v15 | yes before Capella | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). +> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We +usually do this after a major version has been out for a while and everyone has upgraded. In this +case the above table will continue to record the deprecated schema changes for reference. + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. diff --git a/book/src/imgs/ui-account-earnings.png b/book/src/imgs/ui-account-earnings.png new file mode 100644 index 000000000..69e945603 Binary files /dev/null and b/book/src/imgs/ui-account-earnings.png differ diff --git a/book/src/imgs/ui-balance-modal.png b/book/src/imgs/ui-balance-modal.png new file mode 100644 index 000000000..9d7c0e36b Binary files /dev/null and b/book/src/imgs/ui-balance-modal.png differ diff --git a/book/src/imgs/ui-configuration.png b/book/src/imgs/ui-configuration.png new file mode 100644 index 000000000..87ec95d7b Binary files /dev/null and b/book/src/imgs/ui-configuration.png differ diff --git a/book/src/imgs/ui-dashboard.png b/book/src/imgs/ui-dashboard.png new file mode 100644 index 000000000..00552fe07 Binary files /dev/null and b/book/src/imgs/ui-dashboard.png differ diff --git a/book/src/imgs/ui-device.png b/book/src/imgs/ui-device.png new file mode 100644 index 000000000..0f1a0e77d Binary files /dev/null and b/book/src/imgs/ui-device.png differ diff --git a/book/src/imgs/ui-hardware.png b/book/src/imgs/ui-hardware.png new file mode 100644 index 000000000..4178687f5 Binary files /dev/null and b/book/src/imgs/ui-hardware.png differ diff --git a/book/src/imgs/ui-settings.png b/book/src/imgs/ui-settings.png new file mode 100644 index 000000000..da9cbca9f Binary files /dev/null and b/book/src/imgs/ui-settings.png differ diff --git a/book/src/imgs/ui-validator-balance1.png b/book/src/imgs/ui-validator-balance1.png new file mode 100644 index 000000000..4488cdd05 Binary files /dev/null and b/book/src/imgs/ui-validator-balance1.png differ diff --git a/book/src/imgs/ui-validator-balance2.png b/book/src/imgs/ui-validator-balance2.png new file mode 100644 index 000000000..44c5f5167 Binary files /dev/null and b/book/src/imgs/ui-validator-balance2.png differ diff --git a/book/src/imgs/ui-validator-management.png b/book/src/imgs/ui-validator-management.png new file mode 100644 index 000000000..b28b37210 Binary files /dev/null and b/book/src/imgs/ui-validator-management.png differ diff --git a/book/src/imgs/ui-validator-modal.png b/book/src/imgs/ui-validator-modal.png new file mode 100644 index 000000000..d53dc7809 Binary files /dev/null and b/book/src/imgs/ui-validator-modal.png differ diff --git a/book/src/imgs/ui-validator-table.png b/book/src/imgs/ui-validator-table.png new file mode 100644 index 000000000..10bcbd6f1 Binary files /dev/null and b/book/src/imgs/ui-validator-table.png differ diff --git a/book/src/imgs/ui.png b/book/src/imgs/ui.png new file mode 100644 index 000000000..208cb3101 Binary files /dev/null and b/book/src/imgs/ui.png differ diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 8e515a41b..c89dd1add 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -133,6 +133,15 @@ Commonly used features include: * `slasher-lmdb`: support for the LMDB slasher backend. * `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. +* `spec-minimal`: support for the minimal preset (useful for testing). + +Default features (e.g. `slasher-mdbx`) may be opted out of using the `--no-default-features` +argument for `cargo`, which can plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. +E.g. + +``` +CARGO_INSTALL_EXTRA_FLAGS="--no-default-features" make +``` [jemalloc]: https://jemalloc.net/ diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md new file mode 100644 index 000000000..225f293f9 --- /dev/null +++ b/book/src/lighthouse-ui.md @@ -0,0 +1,33 @@ +# Lighthouse UI (Siren) + +_Documentation for Siren users and developers._ + +[![Chat Badge]][Chat Link] + +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh + +![ui-overview](./imgs/ui.png) + +Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and +a Lighthouse Validator Client to monitor performance and display key validator +metrics. + +The UI is currently in active development. Its resides in the +[Siren](https://github.com/sigp/siren) repository. + +## Topics + +See the following Siren specific topics for more context-specific +information: + +- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. +- [Configuration Guide](./ui-configuration.md) - Explanation of how to setup + and configure Siren. +- [Usage](./ui-usage.md) - Details various Siren components. +- [FAQs](./ui-faqs.md) - Frequently Asked Questions. + +## Contributing + +If you find and issue or bug or would otherwise like to help out with the +development of the Siren project, please submit issues and PRs to the [Siren](https://github.com/sigp/siren) repository. diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md new file mode 100644 index 000000000..5b67b03b3 --- /dev/null +++ b/book/src/ui-configuration.md @@ -0,0 +1,47 @@ +# Configuration + +Siren requires a connection to both a Lighthouse Validator Client +and a Lighthouse Beacon Node. Upon running you will first be greeted by the +following configuration screen. + +![ui-configuration](./imgs/ui-configuration.png) + + +## Connecting to the Clients + +This allows you to enter the address and ports of the associated Lighthouse +Beacon node and Lighthouse Validator client. + +> The Beacon Node must be run with the `--gui` flag set. To allow the browser +> to access the node beyond your local computer you also need to allow CORS in +> the http API. This can be done via `--http-allow-origin "*"`. + +A green tick will appear once Siren is able to connect to both clients. You +can specify different ports for each client by clicking on the advanced tab. + + +## API Token + +The API Token is a secret key that allows you to connect to the validator +client. The validator client's HTTP API is guarded by this key because it +contains sensitive validator information and the ability to modify +validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) +for further details. + +Siren requires this token in order to connect to the Validator client. +The token is located in the default data directory of the validator +client. The default path is +`~/.lighthouse//validators/api-token.txt`. + +The contents of this file for the desired valdiator client needs to be +entered. + +## Name + +This is your name, it can be modified and is solely used for aesthetics. + +## Device + +This is a name that can be associated with the validator client/beacon +node pair. Multiple such pairs can be remembered for quick swapping between +them. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md new file mode 100644 index 000000000..240195421 --- /dev/null +++ b/book/src/ui-faqs.md @@ -0,0 +1,13 @@ +# Frequently Asked Questions + +## 1. Where can I find my API token? +The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./ui-configuration.md#api-token). + +## 2. How do I fix the Node Network Errors? +If you recieve a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). + +## 3. How do I change my Beacon or Validator address after logging in? +Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right hand corner there is a `Configurtion` action button that will redirect you back to the configuration screen where you can make appropriate changes. + +## 4. Why doesn't my validator balance graph show any data? +If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md new file mode 100644 index 000000000..0b96b1923 --- /dev/null +++ b/book/src/ui-installation.md @@ -0,0 +1,103 @@ +# 📦 Installation + +Siren runs on Linux, MacOS and Windows. + + +## Pre-Built Electron Packages + +There are pre-compiled electron packages for each operating systems which can +be downloaded and executed. These can be found on the +[releases](https://github.com/sigp/siren/releases) page of the +Siren repository. + +Simply download the package specific to your operating system and run it. + +## Building From Source + +### Requirements + +Building from source requires `Node v18` and `yarn`. + +### Building From Source + +The electron app can be built from source by first cloning the repository and +entering the directory: + +``` +$ git clone https://github.com/sigp/siren.git +$ cd siren +``` + +Once cloned, the electron app can be built and ran via the Makefile by: + +``` +$ make +``` + +alternatively it can be built via: + +``` +$ yarn +``` + +Once completed successfully the electron app can be run via: + +``` +$ yarn dev +``` + +### Running In The Browser + +#### Docker (Recommended) + +Docker is the recommended way to run a webserver that hosts Siren and can be +connected to via a web browser. We recommend this method as it establishes a +production-grade web-server to host the application. + +`docker` is required to be installed with the service running. + +The docker image can be built and run via the Makefile by running: +``` +$ make docker +``` + +Alternatively, to run with Docker, the image needs to be built. From the repository directory +run: +``` +$ docker build -t siren . +``` + +Then to run the image: +``` +$ docker run --rm -ti --name siren -p 80:80 siren +``` + +This will open port 80 and allow your browser to connect. You can choose +another local port by modifying the command. For example `-p 8000:80` will open +port 8000. + +To view Siren, simply go to `http://localhost` in your web browser. + +#### Development Server + +A development server can also be built which will expose a local port 3000 via: +``` +$ yarn start +``` + +Once executed, you can direct your web browser to the following URL to interact +with the app: +``` +http://localhost:3000 +``` + +A production version of the app can be built via +``` +$ yarn build +``` +and then further hosted via a production web server. + +### Known Issues + +If you experience any issues in running the UI please create an issue on the +[Lighthouse UI](https://github.com/sigp/lighthouse-ui) repository. diff --git a/book/src/ui-usage.md b/book/src/ui-usage.md new file mode 100644 index 000000000..e88c4677a --- /dev/null +++ b/book/src/ui-usage.md @@ -0,0 +1,61 @@ +# Usage + +# Dashboard + +Siren's dashboard view provides a summary of all performance and key validator metrics. Sync statuses, uptimes, accumulated rewards, hardware and network metrics are all consolidated on the dashboard for evaluation. + +![](imgs/ui-dashboard.png) + +## Account Earnings + +The account earnings component accumulates reward data from all registered validators providing a summation of total rewards earned while staking. Given current conversion rates, this component also converts your balance into your selected fiat currency. + +Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific timeframe based on current device and network conditions. + +![](imgs/ui-account-earnings.png) + +## Validator Table + +The validator table component is a list of all registered validators, which includes data such as name, index, total balance, earned rewards and current status. Each validator row also contains a link to a detailed data modal and additional data provided by [Beaconcha.in](https://beaconcha.in). + +![](imgs/ui-validator-table.png) + +## Validator Balance Chart + +The validator balance component is a graphical representation of each validator balance over the latest 10 epochs. Take note that only active validators are rendered in the chart visualization. + +![](imgs/ui-validator-balance1.png) + +By clicking on the chart component you can filter selected validators in the render. This call allow for greater resolution in the rendered visualization. + + + + + + +## Hardware Usage and Device Diagnostics + +The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node. + + + + + + +# Validator Management + +Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator. + +![](imgs/ui-validator-management.png) + +## Validator Modal + +Clicking the validator icon activates a detailed validator modal component. This component also allows users to trigger validator actions and as well to view and update validator graffiti. Each modal contains the validator total income with hourly, daily and weekly earnings estimates. + + + +# Settings + +Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters. + +![](imgs/ui-settings.png) diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index a03ba36ca..5d24d1203 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.4.0" +version = "3.5.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 45fc709cb..7e3c025a8 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -307,6 +307,5 @@ define_hardcoded_nets!( // Set to `true` if the genesis state can be found in the `built_in_network_configs` // directory. GENESIS_STATE_IS_KNOWN - ), - (eip4844, "eip4844", GENESIS_STATE_IS_KNOWN) + ) ); diff --git a/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt deleted file mode 100644 index 573541ac9..000000000 --- a/common/eth2_network_config/built_in_network_configs/eip4844/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 7b2d9c133..4ba006ec9 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -28,6 +28,10 @@ TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 +# Capella +CAPELLA_FORK_VERSION: 0x90000072 +CAPELLA_FORK_EPOCH: 56832 + # Eip4844 EIP4844_FORK_VERSION: 0x03001020 EIP4844_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip index 64aa1a5be..1321634ce 100644 Binary files a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip and b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip differ diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3c136b18b..8ad4aa86f 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.4.0-", - fallback = "Lighthouse/v3.4.0" + prefix = "Lighthouse/v3.5.0-", + fallback = "Lighthouse/v3.5.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 5c0e4c1ca..1253ef1ec 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -31,6 +31,77 @@ where } } + /// Inserts a key without removal of potentially expired elements. + /// Returns true if the key does not already exist. + pub fn raw_insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let is_new = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if is_new { + self.list.push_back(Element { + key, + inserted: Instant::now(), + }); + } else { + let position = self + .list + .iter() + .position(|e| e.key == key) + .expect("Key is not new"); + let mut element = self + .list + .remove(position) + .expect("Position is not occupied"); + element.inserted = Instant::now(); + self.list.push_back(element); + } + #[cfg(test)] + self.check_invariant(); + is_new + } + + /// Removes a key from the cache without purging expired elements. Returns true if the key + /// existed. + pub fn raw_remove(&mut self, key: &Key) -> bool { + if self.map.remove(key) { + let position = self + .list + .iter() + .position(|e| &e.key == key) + .expect("Key must exist"); + self.list + .remove(position) + .expect("Position is not occupied"); + true + } else { + false + } + } + + /// Removes all expired elements and returns them + pub fn remove_expired(&mut self) -> Vec { + if self.list.is_empty() { + return Vec::new(); + } + + let mut removed_elements = Vec::new(); + let now = Instant::now(); + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + removed_elements.push(element.key); + } + #[cfg(test)] + self.check_invariant(); + + removed_elements + } + // Inserts a new key. It first purges expired elements to do so. // // If the key was not present this returns `true`. If the value was already present this diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 296247fe9..61299f74a 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -1,6 +1,7 @@ use super::SlotClock; use parking_lot::RwLock; use std::convert::TryInto; +use std::sync::Arc; use std::time::Duration; use types::Slot; @@ -10,7 +11,7 @@ pub struct ManualSlotClock { /// Duration from UNIX epoch to genesis. genesis_duration: Duration, /// Duration from UNIX epoch to right now. - current_time: RwLock, + current_time: Arc>, /// The length of each slot. slot_duration: Duration, } @@ -20,7 +21,7 @@ impl Clone for ManualSlotClock { ManualSlotClock { genesis_slot: self.genesis_slot, genesis_duration: self.genesis_duration, - current_time: RwLock::new(*self.current_time.read()), + current_time: Arc::clone(&self.current_time), slot_duration: self.slot_duration, } } @@ -90,7 +91,7 @@ impl SlotClock for ManualSlotClock { Self { genesis_slot, - current_time: RwLock::new(genesis_duration), + current_time: Arc::new(RwLock::new(genesis_duration)), genesis_duration, slot_duration, } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ace198724..a613f14b6 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -413,18 +413,18 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; - // Default any non-merge execution block hashes to 0x000..000. - let execution_status = anchor_block.message_merge().map_or_else( - |()| ExecutionStatus::irrelevant(), - |message| { - let execution_payload = &message.body.execution_payload; - if execution_payload == &<_>::default() { + let execution_status = anchor_block.message().execution_payload().map_or_else( + // If the block doesn't have an execution payload then it can't have + // execution enabled. + |_| ExecutionStatus::irrelevant(), + |execution_payload| { + if execution_payload.is_default_with_empty_roots() { // A default payload does not have execution enabled. ExecutionStatus::irrelevant() } else { // Assume that this payload is valid, since the anchor should be a trusted block and // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash()) + ExecutionStatus::Valid(execution_payload.block_hash()) } }, ); @@ -857,8 +857,8 @@ where (parent_justified, parent_finalized) } else { let justification_and_finalization_state = match block { - // FIXME: verify this is correct for Capella/Eip4844 because - // epoch processing changes in Capella.. + // TODO(eip4844): Ensure that the final specification + // does not substantially modify per epoch processing. BeaconBlockRef::Eip4844(_) | BeaconBlockRef::Capella(_) | BeaconBlockRef::Merge(_) diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index ef1f113bb..3361f7509 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -176,6 +176,15 @@ impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { } } +impl IntoIterator for VariableList { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + impl tree_hash::TreeHash for VariableList where T: tree_hash::TreeHash, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index b2d7e0007..5fb4bf2ca 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -180,7 +180,15 @@ pub fn per_block_processing>( )?; } +<<<<<<< HEAD process_blob_kzg_commitments(block.body(), ctxt)?; +======= + // Eip4844 specifications are not yet released so additional care is taken + // to ensure the code does not run in production. + if matches!(block, BeaconBlockRef::Eip4844(_)) { + process_blob_kzg_commitments(block.body())?; + } +>>>>>>> unstable Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 15a856c40..bb2679925 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -10,8 +10,8 @@ fn error(reason: Invalid) -> BlockOperationError { BlockOperationError::invalid(reason) } -/// Indicates if a `BlsToExecutionChange` is valid to be included in a block in the current epoch of the given -/// state. +/// Indicates if a `BlsToExecutionChange` is valid to be included in a block, +/// where the block is being applied to the given `state`. /// /// Returns `Ok(())` if the `SignedBlsToExecutionChange` is valid, otherwise indicates the reason for invalidity. pub fn verify_bls_to_execution_change( diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f960b2117..0f26cd0e5 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -485,6 +485,52 @@ impl> EmptyBlock for BeaconBlockMerg } } +impl> BeaconBlockCapella { + /// Return a Capella block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::zero(), + }, + signature: Signature::empty() + }; + T::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockCapella { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyCapella { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Capella::default(), + }, + } + } +} + impl> EmptyBlock for BeaconBlockCapella { /// Returns an empty Capella block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { @@ -752,19 +798,65 @@ mod tests { }); } + #[test] + fn roundtrip_capella_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Capella.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockCapella { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyCapella::random_for_test(rng), + }; + let block = BeaconBlock::Capella(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + + #[test] + fn roundtrip_4844_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Eip4844.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockEip4844 { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyEip4844::random_for_test(rng), + }; + let block = BeaconBlock::Eip4844(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; - let spec = E::default_spec(); + let mut spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = spec.altair_fork_epoch.unwrap(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); - let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_epoch = altair_fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); - let altair_epoch = fork_epoch; + let altair_epoch = altair_fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + let capella_epoch = altair_fork_epoch + 1; + let capella_slot = capella_epoch.start_slot(E::slots_per_epoch()); + let eip4844_epoch = capella_epoch + 1; + let eip4844_slot = eip4844_epoch.start_slot(E::slots_per_epoch()); + + spec.altair_fork_epoch = Some(altair_epoch); + spec.capella_fork_epoch = Some(capella_epoch); + spec.eip4844_fork_epoch = Some(eip4844_epoch); // BeaconBlockBase { @@ -809,5 +901,49 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) .expect_err("bad altair block cannot be decoded"); } + + // BeaconBlockCapella + { + let good_block = BeaconBlock::Capella(BeaconBlockCapella { + slot: capella_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good capella block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad capella block cannot be decoded"); + } + + // BeaconBlockEip4844 + { + let good_block = BeaconBlock::Eip4844(BeaconBlockEip4844 { + slot: eip4844_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Capella block with a epoch lower than the fork epoch. + let bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = capella_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good eip4844 block can be decoded"), + good_block + ); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad eip4844 block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index e70b88427..c98df48d1 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -710,7 +710,6 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } - // TODO: check this implementation /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs deleted file mode 100644 index dd3782d3c..000000000 --- a/consensus/types/src/free_attestation.rs +++ /dev/null @@ -1,13 +0,0 @@ -/// Note: this object does not actually exist in the spec. -/// -/// We use it for managing attestations that have not been aggregated. -use super::{AttestationData, Signature}; -use serde_derive::Serialize; - -#[derive(arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize)] -pub struct FreeAttestation { - pub data: AttestationData, - pub signature: Signature, - #[serde(with = "eth2_serde_utils::quoted_u64")] - pub validator_index: u64, -} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 4e1951222..380f5e527 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -47,7 +47,6 @@ pub mod fork; pub mod fork_data; pub mod fork_name; pub mod fork_versioned_response; -pub mod free_attestation; pub mod graffiti; pub mod historical_batch; pub mod historical_summary; @@ -154,7 +153,6 @@ pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::fork_versioned_response::{ ExecutionOptimisticForkVersionedResponse, ForkVersionDeserialize, ForkVersionedResponse, }; -pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 821f5c57e..e877198db 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.4.0" +version = "3.5.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index cc4366059..eb46faa53 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.4.0" +version = "3.5.0" authors = ["Sigma Prime "] edition = "2021" autotests = false @@ -55,7 +55,7 @@ malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } -slasher = { path = "../slasher" } +slasher = { path = "../slasher", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index da1f5036d..a677abf86 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1457,7 +1457,7 @@ fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("11.25")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config.slasher.as_ref().unwrap(); assert_eq!(slasher_config.slot_offset, 11.25); @@ -1469,7 +1469,7 @@ fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("NaN")) - .run(); + .run_with_zero_port(); } #[test] fn slasher_history_length_flag() { @@ -1504,7 +1504,7 @@ fn slasher_attestation_cache_size_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-att-cache-size", Some("10000")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config .slasher @@ -1608,23 +1608,25 @@ fn ensure_panic_on_failed_launch() { #[test] fn enable_proposer_re_orgs_default() { - CommandLineTest::new().run().with_config(|config| { - assert_eq!( - config.chain.re_org_threshold, - Some(DEFAULT_RE_ORG_THRESHOLD) - ); - assert_eq!( - config.chain.re_org_max_epochs_since_finalization, - DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - ); - }); + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + }); } #[test] fn disable_proposer_re_orgs() { CommandLineTest::new() .flag("disable-proposer-reorgs", None) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); } @@ -1632,7 +1634,7 @@ fn disable_proposer_re_orgs() { fn proposer_re_org_threshold() { CommandLineTest::new() .flag("proposer-reorg-threshold", Some("90")) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); } @@ -1640,7 +1642,7 @@ fn proposer_re_org_threshold() { fn proposer_re_org_max_epochs_since_finalization() { CommandLineTest::new() .flag("proposer-reorg-epochs-since-finalization", Some("8")) - .run() + .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.re_org_max_epochs_since_finalization.as_u64(), diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 9a7c22ea5..dcdf671e3 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -55,7 +55,7 @@ SECONDS_PER_SLOT=3 SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage -PROPOSER_SCORE_BOOST=70 +PROPOSER_SCORE_BOOST=40 # Command line arguments for validator client VC_ARGS="" diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index b0f9ce826..95dfff569 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -2,6 +2,7 @@ # Requires `lighthouse`, ``lcli`, `ganache`, `curl`, `jq` + BEHAVIOR=$1 if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then @@ -9,13 +10,22 @@ if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then exit 1 fi +exit_if_fails() { + echo $@ + $@ + EXIT_CODE=$? + if [[ $EXIT_CODE -eq 1 ]]; then + exit 111 + fi +} + source ./vars.env -../local_testnet/clean.sh +exit_if_fails ../local_testnet/clean.sh echo "Starting ganache" -../local_testnet/ganache_test_node.sh &> /dev/null & +exit_if_fails ../local_testnet/ganache_test_node.sh &> /dev/null & GANACHE_PID=$! # Wait for ganache to start @@ -23,14 +33,14 @@ sleep 5 echo "Setting up local testnet" -../local_testnet/setup.sh +exit_if_fails ../local_testnet/setup.sh # Duplicate this directory so slashing protection doesn't keep us from re-using validator keys -cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger +exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger echo "Starting bootnode" -../local_testnet/bootnode.sh &> /dev/null & +exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & BOOT_PID=$! # wait for the bootnode to start @@ -38,20 +48,20 @@ sleep 10 echo "Starting local beacon nodes" -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 &> /dev/null & BEACON_PID=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 &> /dev/null & BEACON_PID2=$! -../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 &> /dev/null & BEACON_PID3=$! echo "Starting local validator clients" -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 &> /dev/null & VALIDATOR_1_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8100 &> /dev/null & VALIDATOR_2_PID=$! -../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & +exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:8200 &> /dev/null & VALIDATOR_3_PID=$! echo "Waiting an epoch before starting the next validator client" @@ -73,9 +83,14 @@ if [[ "$BEHAVIOR" == "failure" ]]; then echo "Done" - if [[ $DOPPELGANGER_EXIT -eq 124 ]]; then + # We expect to find a doppelganger, exit with success error code if doppelganger was found + # and failure if no doppelganger was found. + if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then + exit 0 + else exit 1 fi + fi if [[ "$BEHAVIOR" == "success" ]]; then diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 376fe3d8c..778a0afca 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 +# Proposer score boost percentage +PROPOSER_SCORE_BOOST=40 + # Enable doppelganger detection VC_ARGS=" --enable-doppelganger-protection " diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 63cf1e464..0a787defa 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -9,7 +9,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } directory = { path = "../../common/directory" } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } network = { path = "../../beacon_node/network" } -slasher = { path = ".." } +slasher = { path = "..", default-features = false } slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 1feba41c8..fc3dea6e2 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.3.0-rc.1 +TESTS_TAG := v1.3.0-rc.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 10c22f0a9..0ca823853 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -53,9 +53,11 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2" ] + def normalize_path(path): return path.split("consensus-spec-tests/")[1] + # Determine the list of filenames which were accessed during tests. passed = set() for line in open(accessed_files_filename, 'r').readlines(): @@ -88,4 +90,5 @@ for root, dirs, files in os.walk(tests_dir_filename): # Exit with an error if there were any files missed. assert len(missed) == 0, "{} missed files".format(len(missed)) -print("Accessed {} files ({} intentionally excluded)".format(accessed_files, excluded_files)) +print("Accessed {} files ({} intentionally excluded)".format( + accessed_files, excluded_files)) diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 3ed81977f..5464d6cfb 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -653,6 +653,11 @@ impl Handler for MerkleProofValidityHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { fork_name != ForkName::Base + // Test is skipped due to some changes in the Capella light client + // spec. + // + // https://github.com/sigp/lighthouse/issues/4022 + && fork_name != ForkName::Capella } } diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 312a55ea5..de3085d22 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,6 +21,4 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } - -[features] -default = [] \ No newline at end of file +logging = { path = "../../common/logging" } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 1b96fa9f3..5a1a5d4f5 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,7 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const GETH_BRANCH: &str = "master"; +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -27,7 +27,9 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // TODO: Update when version is corrected + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + let last_release = "v1.11.1"; build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index fe7e51e92..bb416634e 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -100,7 +100,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: impl TestRig { pub fn new(generic_engine: E) -> Self { - let log = environment::null_logger().unwrap(); + let log = logging::test_logger(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -270,6 +270,8 @@ impl TestRig { }; let proposer_index = 0; + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let prepared = self .ee_a .execution_layer @@ -278,7 +280,7 @@ impl TestRig { head_root, proposer_index, // TODO: think about how to test different forks - PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None), + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), ) .await; @@ -334,6 +336,7 @@ impl TestRig { .unwrap() .to_payload() .execution_payload(); + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); /* * Execution Engine A: @@ -398,7 +401,6 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); - assert_eq!(valid_payload.transactions().len(), pending_txs.len()); // Verify that all submitted txs were successful for pending_tx in pending_txs { @@ -489,8 +491,10 @@ impl TestRig { let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); // TODO: think about how to handle different forks + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None); + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index c30d60344..31337491e 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -121,7 +121,13 @@ pub fn serve( .and_then(|ctx: Arc>| async move { Ok::<_, warp::Rejection>( metrics::gather_prometheus_metrics(&ctx) - .map(|body| Response::builder().status(200).body(body).unwrap()) + .map(|body| { + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(body) + .unwrap() + }) .unwrap_or_else(|e| { Response::builder() .status(500)