Revert "Optimise HTTP validator lookups" (#3658)
## Issue Addressed This reverts commitca9dc8e094
(PR #3559) with some modifications. ## Proposed Changes Unfortunately that PR introduced a performance regression in fork choice. The optimisation _intended_ to build the exit and pubkey caches on the head state _only if_ they were not already built. However, due to the head state always being cloned without these caches, we ended up building them every time the head changed, leading to a ~70ms+ penalty on mainnet.fcfd02aeec/beacon_node/beacon_chain/src/canonical_head.rs (L633-L636)
I believe this is a severe enough regression to justify immediately releasing v3.2.1 with this change. ## Additional Info I didn't fully revert #3559, because there were some unrelated deletions of dead code in that PR which I figured we may as well keep. An alternative would be to clone the extra caches, but this likely still imposes some cost, so in the interest of applying a conservative fix quickly, I think reversion is the best approach. The optimisation from #3559 was not even optimising a particularly significant path, it was mostly for VCs running larger numbers of inactive keys. We can re-do it in the `tree-states` world where cache clones are cheap.
This commit is contained in:
parent
fcfd02aeec
commit
77eabc5401
@ -654,11 +654,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
})
|
||||
})
|
||||
.and_then(|mut snapshot| {
|
||||
// Regardless of where we got the state from, attempt to build all the
|
||||
// caches except the tree hash cache.
|
||||
// Regardless of where we got the state from, attempt to build the committee
|
||||
// caches.
|
||||
snapshot
|
||||
.beacon_state
|
||||
.build_all_caches(&self.spec)
|
||||
.build_all_committee_caches(&self.spec)
|
||||
.map_err(Into::into)
|
||||
.map(|()| snapshot)
|
||||
})?;
|
||||
|
@ -668,10 +668,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
"Invalid validator ID".to_string(),
|
||||
))
|
||||
}))
|
||||
.and(log_filter.clone())
|
||||
.and(warp::path::end())
|
||||
.and_then(
|
||||
|state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId, log| {
|
||||
|state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| {
|
||||
blocking_json_task(move || {
|
||||
let (data, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
@ -679,23 +678,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|state, execution_optimistic| {
|
||||
let index_opt = match &validator_id {
|
||||
ValidatorId::PublicKey(pubkey) => {
|
||||
// Fast path: use the pubkey cache which is probably
|
||||
// initialised at the head.
|
||||
match state.get_validator_index_read_only(pubkey) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
// Slow path, fall back to iteration.
|
||||
debug!(
|
||||
log,
|
||||
"Validator look-up cache miss";
|
||||
"reason" => ?e,
|
||||
);
|
||||
state
|
||||
.validators()
|
||||
.iter()
|
||||
.position(|v| v.pubkey == *pubkey)
|
||||
}
|
||||
}
|
||||
state.validators().iter().position(|v| v.pubkey == *pubkey)
|
||||
}
|
||||
ValidatorId::Index(index) => Some(*index as usize),
|
||||
};
|
||||
|
@ -447,21 +447,6 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
Ok(self.pubkey_cache().get(pubkey))
|
||||
}
|
||||
|
||||
/// Immutable variant of `get_validator_index` which errors if the cache is not up to date.
|
||||
pub fn get_validator_index_read_only(
|
||||
&self,
|
||||
pubkey: &PublicKeyBytes,
|
||||
) -> Result<Option<usize>, Error> {
|
||||
let pubkey_cache = self.pubkey_cache();
|
||||
if pubkey_cache.len() != self.validators().len() {
|
||||
return Err(Error::PubkeyCacheIncomplete {
|
||||
cache_len: pubkey_cache.len(),
|
||||
registry_len: self.validators().len(),
|
||||
});
|
||||
}
|
||||
Ok(pubkey_cache.get(pubkey))
|
||||
}
|
||||
|
||||
/// The epoch corresponding to `self.slot()`.
|
||||
pub fn current_epoch(&self) -> Epoch {
|
||||
self.slot().epoch(T::slots_per_epoch())
|
||||
|
Loading…
Reference in New Issue
Block a user