diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index dac892139..f2ccaf438 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -59,7 +59,7 @@ jobs: x86_64-portable] features: [ {version_suffix: "", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc"}, - {version_suffix: "-dev", env: "gnosis,slasher-lmdb,slasher-mdbx,jemalloc,spec-minimal"} + {version_suffix: "-dev", env: "jemalloc,spec-minimal"} ] include: - profile: maxperf @@ -71,8 +71,6 @@ jobs: VERSION: ${{ needs.extract-version.outputs.VERSION }} VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }} FEATURE_SUFFIX: ${{ matrix.features.version_suffix }} - FEATURES: ${{ matrix.features.env }} - CROSS_FEATURES: ${{ matrix.features.env }} steps: - uses: actions/checkout@v3 - name: Update Rust @@ -112,7 +110,6 @@ jobs: --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX}${FEATURE_SUFFIX} \ - --build-arg FEATURES=${FEATURES} \ --provenance=false \ --push build-docker-multiarch: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 57fee7183..5ecd5efe3 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -280,7 +280,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '1.17' + go-version: '1.20' - uses: actions/setup-dotnet@v3 with: dotnet-version: '6.0.201' diff --git a/Cargo.lock b/Cargo.lock index 1d8d88f38..f52ab165f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.5", + "itoa", "matchit", "memchr", "mime", @@ -815,18 +815,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -1358,13 +1346,12 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.6" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -1435,9 +1422,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9" +checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" dependencies = [ "cc", "cxxbridge-flags", @@ -1447,9 +1434,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d" +checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" dependencies = [ "cc", "codespan-reporting", @@ -1462,15 +1449,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a" +checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" [[package]] name = "cxxbridge-macro" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2" +checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" dependencies = [ "proc-macro2", "quote", @@ -1829,7 +1816,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink", + "hashlink 0.7.0", "hex", "hkdf", "lazy_static", @@ -2480,6 +2467,7 @@ dependencies = [ "fork_choice", "futures", "hex", + "logging", "reqwest", "sensitive_url", "serde_json", @@ -2563,9 +2551,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -2987,7 +2975,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tracing", ] @@ -3048,6 +3036,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "headers" version = "0.3.8" @@ -3187,7 +3184,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.5", + "itoa", ] [[package]] @@ -3306,7 +3303,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -3470,7 +3467,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", ] [[package]] @@ -3597,12 +3594,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.5" @@ -4222,7 +4213,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "webrtc", ] @@ -4309,9 +4300,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -4398,6 +4389,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", + "lru_cache", "parking_lot 0.12.1", "prometheus-client", "quickcheck", @@ -4740,14 +4732,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -5233,9 +5225,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oneshot_broadcast" @@ -5414,9 +5406,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" +checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -5856,7 +5848,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.5", + "itoa", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -6058,9 +6050,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d24607049214c5e42d3df53ac1d8a23c34cc6a5eefe3122acb2c72174719959" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ "r2d2", "rusqlite", @@ -6293,7 +6285,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.23.4", - "tokio-util 0.7.4", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", @@ -6419,16 +6411,15 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.8.1", "libsqlite3-sys", - "memchr", "smallvec", ] @@ -6579,7 +6570,7 @@ checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.3.0", + "parity-scale-codec 3.4.0", "scale-info-derive", ] @@ -6835,11 +6826,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -6862,7 +6853,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -6999,9 +6990,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -7700,10 +7691,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -7733,7 +7725,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.5", + "itoa", "libc", "num_threads", "serde", @@ -7902,7 +7894,7 @@ dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.7", ] [[package]] @@ -7952,9 +7944,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -9000,9 +8992,9 @@ dependencies = [ [[package]] name = "webrtc-ice" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" dependencies = [ "arc-swap", "async-trait", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 4181d7f3c..f956d35d0 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -27,6 +27,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" +lru_cache = { path = "../../common/lru_cache" } parking_lot = "0.12.0" sha2 = "0.10" snap = "1.0.1" diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index b89ee64f1..d9ea0dea0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -8,6 +8,7 @@ use crate::{Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; +use lru_cache::LRUTimeCache; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; @@ -39,6 +40,9 @@ mod network_behaviour; /// requests. This defines the interval in seconds. const HEARTBEAT_INTERVAL: u64 = 30; +/// The minimum amount of time we allow peers to reconnect to us after a disconnect when we are +/// saturated with peers. This effectively looks like a swarm BAN for this amount of time. +pub const PEER_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(600); /// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would /// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet /// peers. @@ -74,6 +78,20 @@ pub struct PeerManager { target_peers: usize, /// Peers queued to be dialed. peers_to_dial: VecDeque<(PeerId, Option)>, + /// The number of temporarily banned peers. This is used to prevent instantaneous + /// reconnection. + // NOTE: This just prevents re-connections. The state of the peer is otherwise unaffected. A + // peer can be in a disconnected state and new connections will be refused and logged as if the + // peer is banned without it being reflected in the peer's state. + // Also the banned state can out-last the peer's reference in the peer db. So peers that are + // unknown to us can still be temporarily banned. This is fundamentally a relationship with + // the swarm. Regardless of our knowledge of the peer in the db, it will be temporarily banned + // at the swarm layer. + // NOTE: An LRUTimeCache is used compared to a structure that needs to be polled to avoid very + // frequent polling to unban peers. Instead, this cache piggy-backs the PeerManager heartbeat + // to update and clear the cache. Therefore the PEER_RECONNECTION_TIMEOUT only has a resolution + // of the HEARTBEAT_INTERVAL. + temporary_banned_peers: LRUTimeCache, /// A collection of sync committee subnets that we need to stay subscribed to. /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run /// discovery queries for subnet peers if we disconnect from existing sync @@ -143,6 +161,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(ping_interval_outbound)), status_peers: HashSetDelay::new(Duration::from_secs(status_interval)), target_peers: target_peer_count, + temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, @@ -243,6 +262,15 @@ impl PeerManager { reason: Option, ) { match ban_operation { + BanOperation::TemporaryBan => { + // The peer could be temporarily banned. We only do this in the case that + // we have currently reached our peer target limit. + if self.network_globals.connected_peers() >= self.target_peers { + // We have enough peers, prevent this reconnection. + self.temporary_banned_peers.raw_insert(*peer_id); + self.events.push(PeerManagerEvent::Banned(*peer_id, vec![])); + } + } BanOperation::DisconnectThePeer => { // The peer was currently connected, so we start a disconnection. // Once the peer has disconnected, its connection state will transition to a @@ -259,6 +287,11 @@ impl PeerManager { BanOperation::ReadyToBan(banned_ips) => { // The peer is not currently connected, we can safely ban it at the swarm // level. + + // If a peer is being banned, this trumps any temporary ban the peer might be + // under. We no longer track it in the temporary ban list. + self.temporary_banned_peers.raw_remove(peer_id); + // Inform the Swarm to ban the peer self.events .push(PeerManagerEvent::Banned(*peer_id, banned_ips)); @@ -1112,6 +1145,14 @@ impl PeerManager { } } + /// Unbans any temporarily banned peers that have served their timeout. + fn unban_temporary_banned_peers(&mut self) { + for peer_id in self.temporary_banned_peers.remove_expired() { + self.events + .push(PeerManagerEvent::UnBanned(peer_id, Vec::new())); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -1144,6 +1185,9 @@ impl PeerManager { // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); + + // Unban any peers that have served their temporary ban timeout + self.unban_temporary_banned_peers(); } // Update metrics related to peer scoring. diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 42eb270c4..21288473e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -170,7 +170,7 @@ impl PeerManager { BanResult::NotBanned => {} } - // Count dialing peers in the limit if the peer dialied us. + // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits if self.peer_limit_reached(count_dialing) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 1f44488a5..61cf8de1c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -844,8 +844,12 @@ impl PeerDB { .collect::>(); return Some(BanOperation::ReadyToBan(banned_ips)); } - PeerConnectionStatus::Disconnecting { .. } - | PeerConnectionStatus::Unknown + PeerConnectionStatus::Disconnecting { .. } => { + // The peer has been disconnected but not banned. Inform the peer manager + // that this peer could be eligible for a temporary ban. + return Some(BanOperation::TemporaryBan); + } + PeerConnectionStatus::Unknown | PeerConnectionStatus::Connected { .. } | PeerConnectionStatus::Dialing { .. } => { self.disconnected_peers += 1; @@ -1177,6 +1181,9 @@ impl From> for ScoreUpdateResult { /// When attempting to ban a peer provides the peer manager with the operation that must be taken. pub enum BanOperation { + /// Optionally temporarily ban this peer to prevent instantaneous reconnection. + /// The peer manager will decide if temporary banning is required. + TemporaryBan, // The peer is currently connected. Perform a graceful disconnect before banning at the swarm // level. DisconnectThePeer, diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 9e1c9f51b..a407fe1bc 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -182,6 +182,7 @@ mod attestation_service { #[cfg(feature = "deterministic_long_lived_attnets")] use std::collections::HashSet; + #[cfg(not(windows))] use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -290,6 +291,7 @@ mod attestation_service { } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[cfg(not(windows))] #[tokio::test] async fn test_same_subnet_unsubscription() { // subscription config @@ -513,6 +515,7 @@ mod attestation_service { assert_eq!(unexpected_msg_count, 0); } + #[cfg(not(windows))] #[tokio::test] async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 5c0e4c1ca..1253ef1ec 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -31,6 +31,77 @@ where } } + /// Inserts a key without removal of potentially expired elements. + /// Returns true if the key does not already exist. + pub fn raw_insert(&mut self, key: Key) -> bool { + // check the cache before removing elements + let is_new = self.map.insert(key.clone()); + + // add the new key to the list, if it doesn't already exist. + if is_new { + self.list.push_back(Element { + key, + inserted: Instant::now(), + }); + } else { + let position = self + .list + .iter() + .position(|e| e.key == key) + .expect("Key is not new"); + let mut element = self + .list + .remove(position) + .expect("Position is not occupied"); + element.inserted = Instant::now(); + self.list.push_back(element); + } + #[cfg(test)] + self.check_invariant(); + is_new + } + + /// Removes a key from the cache without purging expired elements. Returns true if the key + /// existed. + pub fn raw_remove(&mut self, key: &Key) -> bool { + if self.map.remove(key) { + let position = self + .list + .iter() + .position(|e| &e.key == key) + .expect("Key must exist"); + self.list + .remove(position) + .expect("Position is not occupied"); + true + } else { + false + } + } + + /// Removes all expired elements and returns them + pub fn remove_expired(&mut self) -> Vec { + if self.list.is_empty() { + return Vec::new(); + } + + let mut removed_elements = Vec::new(); + let now = Instant::now(); + // remove any expired results + while let Some(element) = self.list.pop_front() { + if element.inserted + self.ttl > now { + self.list.push_front(element); + break; + } + self.map.remove(&element.key); + removed_elements.push(element.key); + } + #[cfg(test)] + self.check_invariant(); + + removed_elements + } + // Inserts a new key. It first purges expired elements to do so. // // If the key was not present this returns `true`. If the value was already present this diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 296247fe9..61299f74a 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -1,6 +1,7 @@ use super::SlotClock; use parking_lot::RwLock; use std::convert::TryInto; +use std::sync::Arc; use std::time::Duration; use types::Slot; @@ -10,7 +11,7 @@ pub struct ManualSlotClock { /// Duration from UNIX epoch to genesis. genesis_duration: Duration, /// Duration from UNIX epoch to right now. - current_time: RwLock, + current_time: Arc>, /// The length of each slot. slot_duration: Duration, } @@ -20,7 +21,7 @@ impl Clone for ManualSlotClock { ManualSlotClock { genesis_slot: self.genesis_slot, genesis_duration: self.genesis_duration, - current_time: RwLock::new(*self.current_time.read()), + current_time: Arc::clone(&self.current_time), slot_duration: self.slot_duration, } } @@ -90,7 +91,7 @@ impl SlotClock for ManualSlotClock { Self { genesis_slot, - current_time: RwLock::new(genesis_duration), + current_time: Arc::new(RwLock::new(genesis_duration)), genesis_duration, slot_duration, } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index aad538968..276077c7f 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -38,7 +38,7 @@ cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" -rusqlite = { version = "0.25.3", features = ["bundled"], optional = true } +rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # `AbstractExecPayload` arbitrary = { version = "1.0", features = ["derive"] } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 053a04f87..a07502c58 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1418,7 +1418,7 @@ fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("11.25")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config.slasher.as_ref().unwrap(); assert_eq!(slasher_config.slot_offset, 11.25); @@ -1430,7 +1430,7 @@ fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-slot-offset", Some("NaN")) - .run(); + .run_with_zero_port(); } #[test] fn slasher_history_length_flag() { @@ -1465,7 +1465,7 @@ fn slasher_attestation_cache_size_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-att-cache-size", Some("10000")) - .run() + .run_with_zero_port() .with_config(|config| { let slasher_config = config .slasher @@ -1569,23 +1569,25 @@ fn ensure_panic_on_failed_launch() { #[test] fn enable_proposer_re_orgs_default() { - CommandLineTest::new().run().with_config(|config| { - assert_eq!( - config.chain.re_org_threshold, - Some(DEFAULT_RE_ORG_THRESHOLD) - ); - assert_eq!( - config.chain.re_org_max_epochs_since_finalization, - DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, - ); - }); + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + }); } #[test] fn disable_proposer_re_orgs() { CommandLineTest::new() .flag("disable-proposer-reorgs", None) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); } @@ -1593,7 +1595,7 @@ fn disable_proposer_re_orgs() { fn proposer_re_org_threshold() { CommandLineTest::new() .flag("proposer-reorg-threshold", Some("90")) - .run() + .run_with_zero_port() .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); } @@ -1601,7 +1603,7 @@ fn proposer_re_org_threshold() { fn proposer_re_org_max_epochs_since_finalization() { CommandLineTest::new() .flag("proposer-reorg-epochs-since-finalization", Some("8")) - .run() + .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.re_org_max_epochs_since_finalization.as_u64(), diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 312a55ea5..de3085d22 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -21,6 +21,4 @@ deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" fork_choice = { path = "../../consensus/fork_choice" } - -[features] -default = [] \ No newline at end of file +logging = { path = "../../common/logging" } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index fe7e51e92..bb416634e 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -100,7 +100,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: impl TestRig { pub fn new(generic_engine: E) -> Self { - let log = environment::null_logger().unwrap(); + let log = logging::test_logger(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -270,6 +270,8 @@ impl TestRig { }; let proposer_index = 0; + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let prepared = self .ee_a .execution_layer @@ -278,7 +280,7 @@ impl TestRig { head_root, proposer_index, // TODO: think about how to test different forks - PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None), + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None), ) .await; @@ -334,6 +336,7 @@ impl TestRig { .unwrap() .to_payload() .execution_payload(); + assert_eq!(valid_payload.transactions().len(), pending_txs.len()); /* * Execution Engine A: @@ -398,7 +401,6 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); - assert_eq!(valid_payload.transactions().len(), pending_txs.len()); // Verify that all submitted txs were successful for pending_tx in pending_txs { @@ -489,8 +491,10 @@ impl TestRig { let head_block_hash = valid_payload.block_hash(); let finalized_block_hash = ExecutionBlockHash::zero(); // TODO: think about how to handle different forks + // To save sending proposer preparation data, just set the fee recipient + // to the fee recipient configured for EE A. let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::zero(), None); + PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); let validator_index = 0; diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 55e7f3f71..631e54dc4 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -12,9 +12,9 @@ path = "tests/main.rs" [dependencies] tempfile = "3.1.0" types = { path = "../../consensus/types" } -rusqlite = { version = "0.25.3", features = ["bundled"] } +rusqlite = { version = "0.28.0", features = ["bundled"] } r2d2 = "0.8.9" -r2d2_sqlite = "0.18.0" +r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index bd5f97f4d..c8be85147 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -162,8 +162,8 @@ impl SlashingDatabase { /// The exclusive locking mode also has the benefit of applying to other processes, so multiple /// Lighthouse processes trying to access the same database will also be blocked. fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> { - conn.pragma_update(None, "foreign_keys", &true)?; - conn.pragma_update(None, "locking_mode", &"EXCLUSIVE")?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?; Ok(()) }