Allow proposer duties request for the next epoch (#2963)

## Issue Addressed

Closes #2880 

## Proposed Changes

Support requests to the next epoch in proposer_duties api.

## Additional Info

Implemented with skipping proposer cache for this case because the cache for the future epoch will be missed every new slot as dependent_root is changed and we don't want to "wash it out" by saving additional values.
This commit is contained in:
eklm 2022-02-18 05:32:00 +00:00
parent c8019caba6
commit 56b2ec6b29
4 changed files with 71 additions and 31 deletions

1
Cargo.lock generated
View File

@ -2442,6 +2442,7 @@ dependencies = [
"lighthouse_network",
"lighthouse_version",
"network",
"safe_arith",
"sensitive_url",
"serde",
"slog",

View File

@ -27,6 +27,7 @@ slot_clock = { path = "../../common/slot_clock" }
eth2_ssz = "0.4.1"
bs58 = "0.4.0"
futures = "0.3.8"
safe_arith = {path = "../../consensus/safe_arith"}
[dev-dependencies]
store = { path = "../store" }

View File

@ -5,11 +5,12 @@ use beacon_chain::{
BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
use eth2::types::{self as api_types};
use safe_arith::SafeArith;
use slog::{debug, Logger};
use slot_clock::SlotClock;
use state_processing::state_advance::partial_state_advance;
use std::cmp::Ordering;
use types::{BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, Slot};
use types::{BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot};
/// The struct that is returned to the requesting HTTP client.
type ApiDuties = api_types::DutiesResponse<Vec<api_types::ProposerData>>;
@ -49,11 +50,21 @@ pub fn proposer_duties<T: BeaconChainTypes>(
);
compute_and_cache_proposer_duties(request_epoch, chain)
}
} else if request_epoch > current_epoch {
// Reject queries about the future as they're very expensive there's no look-ahead for
// proposer duties.
} else if request_epoch
== current_epoch
.safe_add(1)
.map_err(warp_utils::reject::arith_error)?
{
let (proposers, dependent_root, _) = compute_proposer_duties(request_epoch, chain)?;
convert_to_api_response(chain, request_epoch, dependent_root, proposers)
} else if request_epoch
> current_epoch
.safe_add(1)
.map_err(warp_utils::reject::arith_error)?
{
// Reject queries about the future epochs for which lookahead is not possible
Err(warp_utils::reject::custom_bad_request(format!(
"request epoch {} is ahead of the current epoch {}",
"request epoch {} is ahead of the next epoch {}",
request_epoch, current_epoch
)))
} else {
@ -119,6 +130,24 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
current_epoch: Epoch,
chain: &BeaconChain<T>,
) -> Result<ApiDuties, warp::reject::Rejection> {
let (indices, dependent_root, fork) = compute_proposer_duties(current_epoch, chain)?;
// Prime the proposer shuffling cache with the newly-learned value.
chain
.beacon_proposer_cache
.lock()
.insert(current_epoch, dependent_root, indices.clone(), fork)
.map_err(BeaconChainError::from)
.map_err(warp_utils::reject::beacon_chain_error)?;
convert_to_api_response(chain, current_epoch, dependent_root, indices)
}
/// Compute the proposer duties using the head state without cache.
fn compute_proposer_duties<T: BeaconChainTypes>(
current_epoch: Epoch,
chain: &BeaconChain<T>,
) -> Result<(Vec<usize>, Hash256, Fork), warp::reject::Rejection> {
// Take a copy of the head of the chain.
let head = chain
.head()
@ -140,20 +169,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
.map_err(BeaconChainError::from)
.map_err(warp_utils::reject::beacon_chain_error)?;
// Prime the proposer shuffling cache with the newly-learned value.
chain
.beacon_proposer_cache
.lock()
.insert(
state.current_epoch(),
dependent_root,
indices.clone(),
state.fork(),
)
.map_err(BeaconChainError::from)
.map_err(warp_utils::reject::beacon_chain_error)?;
convert_to_api_response(chain, current_epoch, dependent_root, indices)
Ok((indices, dependent_root, state.fork()))
}
/// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the

View File

@ -1671,7 +1671,7 @@ impl ApiTester {
pub async fn test_get_validator_duties_proposer(self) -> Self {
let current_epoch = self.chain.epoch().unwrap();
for epoch in 0..=self.chain.epoch().unwrap().as_u64() {
for epoch in 0..=self.chain.epoch().unwrap().as_u64() + 1 {
let epoch = Epoch::from(epoch);
let dependent_root = self
@ -1780,9 +1780,9 @@ impl ApiTester {
}
}
// Requests to future epochs should fail.
// Requests to the epochs after the next epoch should fail.
self.client
.get_validator_duties_proposer(current_epoch + 1)
.get_validator_duties_proposer(current_epoch + 2)
.await
.unwrap_err();
@ -1802,15 +1802,27 @@ impl ApiTester {
current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1),
);
assert_eq!(
self.client
.get_validator_duties_proposer(current_epoch)
.await
.unwrap_err()
.status()
.map(Into::into),
Some(400),
"should not get proposer duties outside of tolerance"
let dependent_root = self
.chain
.block_root_at_slot(
current_epoch.start_slot(E::slots_per_epoch()) - 1,
WhenSlotSkipped::Prev,
)
.unwrap()
.unwrap_or(self.chain.head_beacon_block_root().unwrap());
self.client
.get_validator_duties_proposer(current_epoch)
.await
.expect("should get proposer duties for the next epoch outside of tolerance");
assert!(
self.chain
.beacon_proposer_cache
.lock()
.get_epoch::<E>(dependent_root, current_epoch)
.is_none(),
"should not prime the proposer cache outside of tolerance"
);
assert_eq!(
@ -1832,6 +1844,16 @@ impl ApiTester {
.get_validator_duties_proposer(current_epoch)
.await
.expect("should get proposer duties within tolerance");
assert!(
self.chain
.beacon_proposer_cache
.lock()
.get_epoch::<E>(dependent_root, current_epoch)
.is_some(),
"should prime the proposer cache inside the tolerance"
);
self.client
.post_validator_duties_attester(next_epoch, &[0])
.await