2021-03-17 05:09:57 +00:00
|
|
|
//! Contains the handler for the `GET validator/duties/proposer/{epoch}` endpoint.
|
|
|
|
|
|
|
|
use crate::state_id::StateId;
|
2021-03-29 23:42:35 +00:00
|
|
|
use beacon_chain::{
|
2022-03-09 00:42:05 +00:00
|
|
|
beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch},
|
2021-03-29 23:42:35 +00:00
|
|
|
BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
|
|
|
};
|
2021-03-17 05:09:57 +00:00
|
|
|
use eth2::types::{self as api_types};
|
2022-02-18 05:32:00 +00:00
|
|
|
use safe_arith::SafeArith;
|
2021-03-17 05:09:57 +00:00
|
|
|
use slog::{debug, Logger};
|
2021-03-29 23:42:35 +00:00
|
|
|
use slot_clock::SlotClock;
|
2021-03-17 05:09:57 +00:00
|
|
|
use std::cmp::Ordering;
|
2022-03-09 00:42:05 +00:00
|
|
|
use types::{CloneConfig, Epoch, EthSpec, Hash256, Slot};
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
/// The struct that is returned to the requesting HTTP client.
|
|
|
|
type ApiDuties = api_types::DutiesResponse<Vec<api_types::ProposerData>>;
|
|
|
|
|
|
|
|
/// Handles a request from the HTTP API for proposer duties.
|
|
|
|
pub fn proposer_duties<T: BeaconChainTypes>(
|
|
|
|
request_epoch: Epoch,
|
|
|
|
chain: &BeaconChain<T>,
|
|
|
|
log: &Logger,
|
|
|
|
) -> Result<ApiDuties, warp::reject::Rejection> {
|
|
|
|
let current_epoch = chain
|
|
|
|
.epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
2021-03-29 23:42:35 +00:00
|
|
|
// Determine what the current epoch would be if we fast-forward our system clock by
|
|
|
|
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
|
|
|
|
//
|
|
|
|
// Most of the time, `tolerant_current_epoch` will be equal to `current_epoch`. However, during
|
|
|
|
// the first `MAXIMUM_GOSSIP_CLOCK_DISPARITY` duration of the epoch `tolerant_current_epoch`
|
|
|
|
// will equal `current_epoch + 1`
|
|
|
|
let tolerant_current_epoch = chain
|
|
|
|
.slot_clock
|
|
|
|
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
|
|
|
.ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))?
|
|
|
|
.epoch(T::EthSpec::slots_per_epoch());
|
|
|
|
|
|
|
|
if request_epoch == current_epoch || request_epoch == tolerant_current_epoch {
|
|
|
|
// If we could consider ourselves in the `request_epoch` when allowing for clock disparity
|
|
|
|
// tolerance then serve this request from the cache.
|
|
|
|
if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? {
|
|
|
|
Ok(duties)
|
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Proposer cache miss";
|
|
|
|
"request_epoch" => request_epoch,
|
|
|
|
);
|
|
|
|
compute_and_cache_proposer_duties(request_epoch, chain)
|
|
|
|
}
|
2022-02-18 05:32:00 +00:00
|
|
|
} else if request_epoch
|
|
|
|
== current_epoch
|
|
|
|
.safe_add(1)
|
|
|
|
.map_err(warp_utils::reject::arith_error)?
|
|
|
|
{
|
2022-07-25 08:23:00 +00:00
|
|
|
let (proposers, dependent_root, execution_status, _fork) =
|
2022-03-09 00:42:05 +00:00
|
|
|
compute_proposer_duties_from_head(request_epoch, chain)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
convert_to_api_response(
|
|
|
|
chain,
|
|
|
|
request_epoch,
|
|
|
|
dependent_root,
|
2022-07-30 05:08:57 +00:00
|
|
|
execution_status.is_optimistic_or_invalid(),
|
2022-07-25 08:23:00 +00:00
|
|
|
proposers,
|
|
|
|
)
|
2022-02-18 05:32:00 +00:00
|
|
|
} else if request_epoch
|
|
|
|
> current_epoch
|
|
|
|
.safe_add(1)
|
|
|
|
.map_err(warp_utils::reject::arith_error)?
|
|
|
|
{
|
|
|
|
// Reject queries about the future epochs for which lookahead is not possible
|
2021-03-29 23:42:35 +00:00
|
|
|
Err(warp_utils::reject::custom_bad_request(format!(
|
2022-02-18 05:32:00 +00:00
|
|
|
"request epoch {} is ahead of the next epoch {}",
|
2021-03-17 05:09:57 +00:00
|
|
|
request_epoch, current_epoch
|
2021-03-29 23:42:35 +00:00
|
|
|
)))
|
|
|
|
} else {
|
2021-03-17 05:09:57 +00:00
|
|
|
// request_epoch < current_epoch
|
|
|
|
//
|
|
|
|
// Queries about the past are handled with a slow path.
|
2021-03-29 23:42:35 +00:00
|
|
|
compute_historic_proposer_duties(request_epoch, chain)
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to load the proposer duties from the `chain.beacon_proposer_cache`, returning `Ok(None)`
|
|
|
|
/// if there is a cache miss.
|
|
|
|
///
|
|
|
|
/// ## Notes
|
|
|
|
///
|
2021-03-29 23:42:35 +00:00
|
|
|
/// The `current_epoch` value should equal the current epoch on the slot clock (with some
|
|
|
|
/// tolerance), otherwise we risk washing out the proposer cache at the expense of block processing.
|
2021-03-17 05:09:57 +00:00
|
|
|
fn try_proposer_duties_from_cache<T: BeaconChainTypes>(
|
2021-03-29 23:42:35 +00:00
|
|
|
request_epoch: Epoch,
|
2021-03-17 05:09:57 +00:00
|
|
|
chain: &BeaconChain<T>,
|
|
|
|
) -> Result<Option<ApiDuties>, warp::reject::Rejection> {
|
2022-07-25 08:23:00 +00:00
|
|
|
let head = chain.canonical_head.cached_head();
|
|
|
|
let head_block = &head.snapshot.beacon_block;
|
|
|
|
let head_block_root = head.head_block_root();
|
|
|
|
let head_decision_root = head
|
|
|
|
.snapshot
|
|
|
|
.beacon_state
|
|
|
|
.proposer_shuffling_decision_root(head_block_root)
|
|
|
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
|
|
|
let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch());
|
|
|
|
let execution_optimistic = chain
|
2022-07-30 05:08:57 +00:00
|
|
|
.is_optimistic_or_invalid_head_block(head_block)
|
2022-07-25 08:23:00 +00:00
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2021-03-17 05:09:57 +00:00
|
|
|
|
2021-03-29 23:42:35 +00:00
|
|
|
let dependent_root = match head_epoch.cmp(&request_epoch) {
|
|
|
|
// head_epoch == request_epoch
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
Ordering::Equal => head_decision_root,
|
2021-03-29 23:42:35 +00:00
|
|
|
// head_epoch < request_epoch
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
Ordering::Less => head_block_root,
|
2021-03-29 23:42:35 +00:00
|
|
|
// head_epoch > request_epoch
|
2021-03-17 05:09:57 +00:00
|
|
|
Ordering::Greater => {
|
|
|
|
return Err(warp_utils::reject::custom_server_error(format!(
|
2021-03-29 23:42:35 +00:00
|
|
|
"head epoch {} is later than request epoch {}",
|
|
|
|
head_epoch, request_epoch
|
2021-03-17 05:09:57 +00:00
|
|
|
)))
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
chain
|
|
|
|
.beacon_proposer_cache
|
|
|
|
.lock()
|
2021-03-29 23:42:35 +00:00
|
|
|
.get_epoch::<T::EthSpec>(dependent_root, request_epoch)
|
2021-03-17 05:09:57 +00:00
|
|
|
.cloned()
|
|
|
|
.map(|indices| {
|
2022-07-25 08:23:00 +00:00
|
|
|
convert_to_api_response(
|
|
|
|
chain,
|
|
|
|
request_epoch,
|
|
|
|
dependent_root,
|
|
|
|
execution_optimistic,
|
|
|
|
indices.to_vec(),
|
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
})
|
|
|
|
.transpose()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Compute the proposer duties using the head state, add the duties to the proposer cache and
|
|
|
|
/// return the proposers.
|
|
|
|
///
|
|
|
|
/// This method does *not* attempt to read the values from the cache before computing them. See
|
|
|
|
/// `try_proposer_duties_from_cache` to read values.
|
|
|
|
///
|
|
|
|
/// ## Notes
|
|
|
|
///
|
|
|
|
/// The `current_epoch` value should equal the current epoch on the slot clock, otherwise we risk
|
|
|
|
/// washing out the proposer cache at the expense of block processing.
|
|
|
|
fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
|
|
|
|
current_epoch: Epoch,
|
|
|
|
chain: &BeaconChain<T>,
|
|
|
|
) -> Result<ApiDuties, warp::reject::Rejection> {
|
2022-07-25 08:23:00 +00:00
|
|
|
let (indices, dependent_root, execution_status, fork) =
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
compute_proposer_duties_from_head(current_epoch, chain)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2022-02-18 05:32:00 +00:00
|
|
|
|
|
|
|
// Prime the proposer shuffling cache with the newly-learned value.
|
|
|
|
chain
|
|
|
|
.beacon_proposer_cache
|
|
|
|
.lock()
|
|
|
|
.insert(current_epoch, dependent_root, indices.clone(), fork)
|
|
|
|
.map_err(BeaconChainError::from)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
convert_to_api_response(
|
|
|
|
chain,
|
|
|
|
current_epoch,
|
|
|
|
dependent_root,
|
2022-07-30 05:08:57 +00:00
|
|
|
execution_status.is_optimistic_or_invalid(),
|
2022-07-25 08:23:00 +00:00
|
|
|
indices,
|
|
|
|
)
|
2022-02-18 05:32:00 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the
|
|
|
|
/// `beacon_proposer_cache`.
|
|
|
|
fn compute_historic_proposer_duties<T: BeaconChainTypes>(
|
|
|
|
epoch: Epoch,
|
|
|
|
chain: &BeaconChain<T>,
|
|
|
|
) -> Result<ApiDuties, warp::reject::Rejection> {
|
|
|
|
// If the head is quite old then it might still be relevant for a historical request.
|
|
|
|
//
|
2022-07-25 08:23:00 +00:00
|
|
|
// Avoid holding the `cached_head` longer than necessary.
|
|
|
|
let state_opt = {
|
|
|
|
let (cached_head, execution_status) = chain
|
|
|
|
.canonical_head
|
|
|
|
.head_and_execution_status()
|
2022-03-09 00:42:05 +00:00
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
let head = &cached_head.snapshot;
|
|
|
|
|
|
|
|
if head.beacon_state.current_epoch() <= epoch {
|
|
|
|
Some((
|
|
|
|
head.beacon_state_root(),
|
|
|
|
head.beacon_state
|
|
|
|
.clone_with(CloneConfig::committee_caches_only()),
|
2022-07-30 05:08:57 +00:00
|
|
|
execution_status.is_optimistic_or_invalid(),
|
2022-07-25 08:23:00 +00:00
|
|
|
))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
};
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let (state, execution_optimistic) =
|
|
|
|
if let Some((state_root, mut state, execution_optimistic)) = state_opt {
|
|
|
|
// If we've loaded the head state it might be from a previous epoch, ensure it's in a
|
|
|
|
// suitable epoch.
|
|
|
|
ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
(state, execution_optimistic)
|
|
|
|
} else {
|
|
|
|
StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?
|
|
|
|
};
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// Ensure the state lookup was correct.
|
|
|
|
if state.current_epoch() != epoch {
|
|
|
|
return Err(warp_utils::reject::custom_server_error(format!(
|
|
|
|
"state epoch {} not equal to request epoch {}",
|
|
|
|
state.current_epoch(),
|
|
|
|
epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let indices = state
|
|
|
|
.get_beacon_proposer_indices(&chain.spec)
|
|
|
|
.map_err(BeaconChainError::from)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
// We can supply the genesis block root as the block root since we know that the only block that
|
|
|
|
// decides its own root is the genesis block.
|
|
|
|
let dependent_root = state
|
|
|
|
.proposer_shuffling_decision_root(chain.genesis_block_root)
|
|
|
|
.map_err(BeaconChainError::from)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices)
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Converts the internal representation of proposer duties into one that is compatible with the
|
|
|
|
/// standard API.
|
|
|
|
fn convert_to_api_response<T: BeaconChainTypes>(
|
|
|
|
chain: &BeaconChain<T>,
|
|
|
|
epoch: Epoch,
|
|
|
|
dependent_root: Hash256,
|
2022-07-25 08:23:00 +00:00
|
|
|
execution_optimistic: bool,
|
2021-03-17 05:09:57 +00:00
|
|
|
indices: Vec<usize>,
|
|
|
|
) -> Result<ApiDuties, warp::reject::Rejection> {
|
|
|
|
let index_to_pubkey_map = chain
|
|
|
|
.validator_pubkey_bytes_many(&indices)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
// Map our internal data structure into the API structure.
|
|
|
|
let proposer_data = indices
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.filter_map(|(i, &validator_index)| {
|
|
|
|
// Offset the index in `indices` to determine the slot for which these
|
|
|
|
// duties apply.
|
|
|
|
let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()) + Slot::from(i);
|
|
|
|
|
|
|
|
Some(api_types::ProposerData {
|
|
|
|
pubkey: *index_to_pubkey_map.get(&validator_index)?,
|
|
|
|
validator_index: validator_index as u64,
|
|
|
|
slot,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Consistency check.
|
|
|
|
let slots_per_epoch = T::EthSpec::slots_per_epoch() as usize;
|
|
|
|
if proposer_data.len() != slots_per_epoch {
|
|
|
|
Err(warp_utils::reject::custom_server_error(format!(
|
|
|
|
"{} proposers is not enough for {} slots",
|
|
|
|
proposer_data.len(),
|
|
|
|
slots_per_epoch,
|
|
|
|
)))
|
|
|
|
} else {
|
|
|
|
Ok(api_types::DutiesResponse {
|
|
|
|
dependent_root,
|
2022-07-25 08:23:00 +00:00
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2021-03-17 05:09:57 +00:00
|
|
|
data: proposer_data,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|