2019-02-14 01:09:18 +00:00
|
|
|
use serde_derive::Serialize;
|
2019-05-27 06:13:32 +00:00
|
|
|
use ssz_derive::{Decode, Encode};
|
Optimise tree hash caching for block production (#2106)
## Proposed Changes
`@potuz` on the Eth R&D Discord observed that Lighthouse blocks on Pyrmont were always arriving at other nodes after at least 1 second. Part of this could be due to processing and slow propagation, but metrics also revealed that the Lighthouse nodes were usually taking 400-600ms to even just produce a block before broadcasting it.
I tracked the slowness down to the lack of a pre-built tree hash cache (THC) on the states being used for block production. This was due to using the head state for block production, which lacks a THC in order to keep fork choice fast (cloning a THC takes at least 30ms for 100k validators). This PR modifies block production to clone a state from the snapshot cache rather than the head, which speeds things up by 200-400ms by avoiding the tree hash cache rebuild. In practice this seems to have cut block production time down to 300ms or less. Ideally we could _remove_ the snapshot from the cache (and save the 30ms), but it is required for when we re-process the block after signing it with the validator client.
## Alternatives
I experimented with 2 alternatives to this approach, before deciding on it:
* Alternative 1: ensure the `head` has a tree hash cache. This is too slow, as it imposes a +30ms hit on fork choice, which currently takes ~5ms (with occasional spikes).
* Alternative 2: use `Arc<BeaconSnapshot>` in the snapshot cache and share snapshots between the cache and the `head`. This made fork choice blazing fast (1ms), and block production the same as in this PR, but had a negative impact on block processing which I don't think is worth it. It ended up being necessary to clone the full state from the snapshot cache during block production, imposing the +30ms penalty there _as well_ as in block production.
In contract, the approach in this PR should only impact block production, and it improves it! Yay for pareto improvements :tada:
## Additional Info
This commit (ac59dfa) is currently running on all the Lighthouse Pyrmont nodes, and I've added a dashboard to the Pyrmont grafana instance with the metrics.
In future work we should optimise the attestation packing, which consumes around 30-60ms and is now a substantial contributor to the total.
2020-12-21 06:29:39 +00:00
|
|
|
use types::{beacon_state::CloneConfig, BeaconState, EthSpec, Hash256, SignedBeaconBlock};
|
2019-02-14 01:09:18 +00:00
|
|
|
|
2020-02-10 23:19:36 +00:00
|
|
|
/// Represents some block and its associated state. Generally, this will be used for tracking the
|
2019-02-14 01:09:18 +00:00
|
|
|
/// head, justified head and finalized head.
|
2019-05-27 06:13:32 +00:00
|
|
|
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
|
2020-04-06 00:53:33 +00:00
|
|
|
pub struct BeaconSnapshot<E: EthSpec> {
|
2020-02-10 23:19:36 +00:00
|
|
|
pub beacon_block: SignedBeaconBlock<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
pub beacon_block_root: Hash256,
|
2019-05-13 04:44:43 +00:00
|
|
|
pub beacon_state: BeaconState<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
pub beacon_state_root: Hash256,
|
|
|
|
}
|
|
|
|
|
2020-04-06 00:53:33 +00:00
|
|
|
impl<E: EthSpec> BeaconSnapshot<E> {
|
2019-02-14 01:09:18 +00:00
|
|
|
/// Create a new checkpoint.
|
|
|
|
pub fn new(
|
2020-02-10 23:19:36 +00:00
|
|
|
beacon_block: SignedBeaconBlock<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
beacon_block_root: Hash256,
|
2019-05-13 04:44:43 +00:00
|
|
|
beacon_state: BeaconState<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
beacon_state_root: Hash256,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
beacon_block,
|
|
|
|
beacon_block_root,
|
|
|
|
beacon_state,
|
|
|
|
beacon_state_root,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Update all fields of the checkpoint.
|
|
|
|
pub fn update(
|
|
|
|
&mut self,
|
2020-02-10 23:19:36 +00:00
|
|
|
beacon_block: SignedBeaconBlock<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
beacon_block_root: Hash256,
|
2019-05-13 04:44:43 +00:00
|
|
|
beacon_state: BeaconState<E>,
|
2019-02-14 01:09:18 +00:00
|
|
|
beacon_state_root: Hash256,
|
|
|
|
) {
|
|
|
|
self.beacon_block = beacon_block;
|
|
|
|
self.beacon_block_root = beacon_block_root;
|
|
|
|
self.beacon_state = beacon_state;
|
|
|
|
self.beacon_state_root = beacon_state_root;
|
|
|
|
}
|
2020-02-04 01:43:04 +00:00
|
|
|
|
Optimise tree hash caching for block production (#2106)
## Proposed Changes
`@potuz` on the Eth R&D Discord observed that Lighthouse blocks on Pyrmont were always arriving at other nodes after at least 1 second. Part of this could be due to processing and slow propagation, but metrics also revealed that the Lighthouse nodes were usually taking 400-600ms to even just produce a block before broadcasting it.
I tracked the slowness down to the lack of a pre-built tree hash cache (THC) on the states being used for block production. This was due to using the head state for block production, which lacks a THC in order to keep fork choice fast (cloning a THC takes at least 30ms for 100k validators). This PR modifies block production to clone a state from the snapshot cache rather than the head, which speeds things up by 200-400ms by avoiding the tree hash cache rebuild. In practice this seems to have cut block production time down to 300ms or less. Ideally we could _remove_ the snapshot from the cache (and save the 30ms), but it is required for when we re-process the block after signing it with the validator client.
## Alternatives
I experimented with 2 alternatives to this approach, before deciding on it:
* Alternative 1: ensure the `head` has a tree hash cache. This is too slow, as it imposes a +30ms hit on fork choice, which currently takes ~5ms (with occasional spikes).
* Alternative 2: use `Arc<BeaconSnapshot>` in the snapshot cache and share snapshots between the cache and the `head`. This made fork choice blazing fast (1ms), and block production the same as in this PR, but had a negative impact on block processing which I don't think is worth it. It ended up being necessary to clone the full state from the snapshot cache during block production, imposing the +30ms penalty there _as well_ as in block production.
In contract, the approach in this PR should only impact block production, and it improves it! Yay for pareto improvements :tada:
## Additional Info
This commit (ac59dfa) is currently running on all the Lighthouse Pyrmont nodes, and I've added a dashboard to the Pyrmont grafana instance with the metrics.
In future work we should optimise the attestation packing, which consumes around 30-60ms and is now a substantial contributor to the total.
2020-12-21 06:29:39 +00:00
|
|
|
pub fn clone_with(&self, clone_config: CloneConfig) -> Self {
|
2020-02-04 01:43:04 +00:00
|
|
|
Self {
|
|
|
|
beacon_block: self.beacon_block.clone(),
|
|
|
|
beacon_block_root: self.beacon_block_root,
|
Optimise tree hash caching for block production (#2106)
## Proposed Changes
`@potuz` on the Eth R&D Discord observed that Lighthouse blocks on Pyrmont were always arriving at other nodes after at least 1 second. Part of this could be due to processing and slow propagation, but metrics also revealed that the Lighthouse nodes were usually taking 400-600ms to even just produce a block before broadcasting it.
I tracked the slowness down to the lack of a pre-built tree hash cache (THC) on the states being used for block production. This was due to using the head state for block production, which lacks a THC in order to keep fork choice fast (cloning a THC takes at least 30ms for 100k validators). This PR modifies block production to clone a state from the snapshot cache rather than the head, which speeds things up by 200-400ms by avoiding the tree hash cache rebuild. In practice this seems to have cut block production time down to 300ms or less. Ideally we could _remove_ the snapshot from the cache (and save the 30ms), but it is required for when we re-process the block after signing it with the validator client.
## Alternatives
I experimented with 2 alternatives to this approach, before deciding on it:
* Alternative 1: ensure the `head` has a tree hash cache. This is too slow, as it imposes a +30ms hit on fork choice, which currently takes ~5ms (with occasional spikes).
* Alternative 2: use `Arc<BeaconSnapshot>` in the snapshot cache and share snapshots between the cache and the `head`. This made fork choice blazing fast (1ms), and block production the same as in this PR, but had a negative impact on block processing which I don't think is worth it. It ended up being necessary to clone the full state from the snapshot cache during block production, imposing the +30ms penalty there _as well_ as in block production.
In contract, the approach in this PR should only impact block production, and it improves it! Yay for pareto improvements :tada:
## Additional Info
This commit (ac59dfa) is currently running on all the Lighthouse Pyrmont nodes, and I've added a dashboard to the Pyrmont grafana instance with the metrics.
In future work we should optimise the attestation packing, which consumes around 30-60ms and is now a substantial contributor to the total.
2020-12-21 06:29:39 +00:00
|
|
|
beacon_state: self.beacon_state.clone_with(clone_config),
|
2020-02-04 01:43:04 +00:00
|
|
|
beacon_state_root: self.beacon_state_root,
|
|
|
|
}
|
|
|
|
}
|
2019-02-14 01:09:18 +00:00
|
|
|
}
|