2022-01-27 01:06:02 +00:00
|
|
|
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
|
|
|
use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta};
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
use operation_pool::{AttMaxCover, MaxCover, RewardCache, SplitAttestation};
|
|
|
|
use state_processing::{
|
|
|
|
common::get_attesting_indices_from_state,
|
|
|
|
per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards,
|
|
|
|
};
|
2022-10-26 19:15:26 +00:00
|
|
|
use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, EthSpec, Hash256};
|
2022-01-27 01:06:02 +00:00
|
|
|
|
|
|
|
impl<T: BeaconChainTypes> BeaconChain<T> {
|
2022-10-26 19:15:26 +00:00
|
|
|
pub fn compute_block_reward<Payload: AbstractExecPayload<T::EthSpec>>(
|
2022-01-27 01:06:02 +00:00
|
|
|
&self,
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
block: BeaconBlockRef<'_, T::EthSpec, Payload>,
|
2022-01-27 01:06:02 +00:00
|
|
|
block_root: Hash256,
|
|
|
|
state: &BeaconState<T::EthSpec>,
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
reward_cache: &mut RewardCache,
|
2022-06-29 04:50:37 +00:00
|
|
|
include_attestations: bool,
|
2022-01-27 01:06:02 +00:00
|
|
|
) -> Result<BlockReward, BeaconChainError> {
|
|
|
|
if block.slot() != state.slot() {
|
|
|
|
return Err(BeaconChainError::BlockRewardSlotError);
|
|
|
|
}
|
|
|
|
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
reward_cache.update(state)?;
|
|
|
|
|
2022-06-29 04:50:37 +00:00
|
|
|
let total_active_balance = state.get_total_active_balance()?;
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
|
|
|
|
let split_attestations = block
|
2022-01-27 01:06:02 +00:00
|
|
|
.body()
|
|
|
|
.attestations()
|
|
|
|
.iter()
|
|
|
|
.map(|att| {
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
let attesting_indices = get_attesting_indices_from_state(state, att)?;
|
|
|
|
Ok(SplitAttestation::new(att.clone(), attesting_indices))
|
|
|
|
})
|
|
|
|
.collect::<Result<Vec<_>, BeaconChainError>>()?;
|
|
|
|
|
|
|
|
let mut per_attestation_rewards = split_attestations
|
|
|
|
.iter()
|
|
|
|
.map(|att| {
|
|
|
|
AttMaxCover::new(
|
|
|
|
att.as_ref(),
|
|
|
|
state,
|
|
|
|
reward_cache,
|
|
|
|
total_active_balance,
|
|
|
|
&self.spec,
|
|
|
|
)
|
|
|
|
.ok_or(BeaconChainError::BlockRewardAttestationError)
|
2022-01-27 01:06:02 +00:00
|
|
|
})
|
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
|
|
|
|
|
|
// Update the attestation rewards for each previous attestation included.
|
|
|
|
// This is O(n^2) in the number of attestations n.
|
|
|
|
for i in 0..per_attestation_rewards.len() {
|
|
|
|
let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1);
|
|
|
|
let latest_att = &updated[i];
|
|
|
|
|
|
|
|
for att in to_update {
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
att.update_covering_set(latest_att.intermediate(), latest_att.covering_set());
|
2022-01-27 01:06:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut prev_epoch_total = 0;
|
|
|
|
let mut curr_epoch_total = 0;
|
|
|
|
|
|
|
|
for cover in &per_attestation_rewards {
|
|
|
|
for &reward in cover.fresh_validators_rewards.values() {
|
|
|
|
if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch()
|
|
|
|
{
|
|
|
|
curr_epoch_total += reward;
|
|
|
|
} else {
|
|
|
|
prev_epoch_total += reward;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let attestation_total = prev_epoch_total + curr_epoch_total;
|
|
|
|
|
|
|
|
// Drop the covers.
|
|
|
|
let per_attestation_rewards = per_attestation_rewards
|
|
|
|
.into_iter()
|
|
|
|
.map(|cover| cover.fresh_validators_rewards)
|
|
|
|
.collect();
|
|
|
|
|
2022-06-29 04:50:37 +00:00
|
|
|
// Add the attestation data if desired.
|
|
|
|
let attestations = if include_attestations {
|
|
|
|
block
|
|
|
|
.body()
|
|
|
|
.attestations()
|
|
|
|
.iter()
|
|
|
|
.map(|a| a.data.clone())
|
|
|
|
.collect()
|
|
|
|
} else {
|
|
|
|
vec![]
|
|
|
|
};
|
|
|
|
|
2022-01-27 01:06:02 +00:00
|
|
|
let attestation_rewards = AttestationRewards {
|
|
|
|
total: attestation_total,
|
|
|
|
prev_epoch_total,
|
|
|
|
curr_epoch_total,
|
|
|
|
per_attestation_rewards,
|
2022-06-29 04:50:37 +00:00
|
|
|
attestations,
|
2022-01-27 01:06:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Sync committee rewards.
|
|
|
|
let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
|
|
|
let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec)
|
|
|
|
.map_err(|_| BeaconChainError::BlockRewardSyncError)?;
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
// Total, metadata
|
|
|
|
let total = attestation_total + sync_committee_rewards;
|
|
|
|
|
|
|
|
let meta = BlockRewardMeta {
|
|
|
|
slot: block.slot(),
|
|
|
|
parent_slot: state.latest_block_header().slot,
|
|
|
|
proposer_index: block.proposer_index(),
|
|
|
|
graffiti: block.body().graffiti().as_utf8_lossy(),
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(BlockReward {
|
|
|
|
total,
|
|
|
|
block_root,
|
|
|
|
meta,
|
|
|
|
attestation_rewards,
|
|
|
|
sync_committee_rewards,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|