2019-06-17 08:07:14 +00:00
|
|
|
mod attestation;
|
|
|
|
mod attestation_id;
|
2020-10-22 01:43:54 +00:00
|
|
|
mod attester_slashing;
|
2019-06-17 08:07:14 +00:00
|
|
|
mod max_cover;
|
2021-04-13 05:27:42 +00:00
|
|
|
mod metrics;
|
2019-06-18 07:55:18 +00:00
|
|
|
mod persistence;
|
2021-07-15 00:52:02 +00:00
|
|
|
mod sync_aggregate_id;
|
2019-06-18 07:55:18 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
pub use persistence::{
|
|
|
|
PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase,
|
|
|
|
};
|
2019-06-17 08:07:14 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
use crate::sync_aggregate_id::SyncAggregateId;
|
2020-01-19 23:33:28 +00:00
|
|
|
use attestation::AttMaxCover;
|
2019-06-17 08:07:14 +00:00
|
|
|
use attestation_id::AttestationId;
|
2020-10-22 01:43:54 +00:00
|
|
|
use attester_slashing::AttesterSlashingMaxCover;
|
2021-04-13 05:27:42 +00:00
|
|
|
use max_cover::{maximum_cover, MaxCover};
|
2019-03-30 01:26:25 +00:00
|
|
|
use parking_lot::RwLock;
|
2020-06-18 11:06:34 +00:00
|
|
|
use state_processing::per_block_processing::errors::AttestationValidationError;
|
2019-03-06 03:46:12 +00:00
|
|
|
use state_processing::per_block_processing::{
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_exit,
|
|
|
|
VerifySignatures,
|
2019-03-06 03:46:12 +00:00
|
|
|
};
|
2020-06-18 11:06:34 +00:00
|
|
|
use state_processing::SigVerifiedOp;
|
2021-07-15 00:52:02 +00:00
|
|
|
use std::collections::{hash_map::Entry, HashMap, HashSet};
|
2019-05-08 07:07:26 +00:00
|
|
|
use std::marker::PhantomData;
|
2020-06-08 21:08:54 +00:00
|
|
|
use std::ptr;
|
2019-03-06 03:46:12 +00:00
|
|
|
use types::{
|
2021-07-15 00:52:02 +00:00
|
|
|
sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttesterSlashing,
|
|
|
|
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, Hash256,
|
2021-09-03 07:50:43 +00:00
|
|
|
ProposerSlashing, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution,
|
|
|
|
Validator,
|
2019-03-06 03:46:12 +00:00
|
|
|
};
|
2021-07-15 00:52:02 +00:00
|
|
|
|
|
|
|
type SyncContributions<T> = RwLock<HashMap<SyncAggregateId, Vec<SyncCommitteeContribution<T>>>>;
|
|
|
|
|
2019-06-26 03:06:08 +00:00
|
|
|
#[derive(Default, Debug)]
|
2019-05-10 04:47:09 +00:00
|
|
|
pub struct OperationPool<T: EthSpec + Default> {
|
2019-03-19 08:15:33 +00:00
|
|
|
/// Map from attestation ID (see below) to vectors of attestations.
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
attestations: RwLock<HashMap<AttestationId, Vec<Attestation<T>>>>,
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID.
|
|
|
|
sync_contributions: SyncContributions<T>,
|
2020-06-18 11:06:34 +00:00
|
|
|
/// Set of attester slashings, and the fork version they were verified against.
|
|
|
|
attester_slashings: RwLock<HashSet<(AttesterSlashing<T>, ForkVersion)>>,
|
2019-03-06 03:46:12 +00:00
|
|
|
/// Map from proposer index to slashing.
|
2019-03-30 01:26:25 +00:00
|
|
|
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
|
2019-03-06 03:46:12 +00:00
|
|
|
/// Map from exiting validator to their exit data.
|
2020-02-10 23:19:36 +00:00
|
|
|
voluntary_exits: RwLock<HashMap<u64, SignedVoluntaryExit>>,
|
2019-05-08 07:07:26 +00:00
|
|
|
_phantom: PhantomData<T>,
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
2020-01-19 23:33:28 +00:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum OpPoolError {
|
|
|
|
GetAttestationsTotalBalanceError(BeaconStateError),
|
2021-07-15 00:52:02 +00:00
|
|
|
GetBlockRootError(BeaconStateError),
|
|
|
|
SyncAggregateError(SyncAggregateError),
|
|
|
|
IncorrectOpPoolVariant,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<SyncAggregateError> for OpPoolError {
|
|
|
|
fn from(e: SyncAggregateError) -> Self {
|
|
|
|
OpPoolError::SyncAggregateError(e)
|
|
|
|
}
|
2020-01-19 23:33:28 +00:00
|
|
|
}
|
|
|
|
|
2019-05-10 04:47:09 +00:00
|
|
|
impl<T: EthSpec> OperationPool<T> {
|
2019-03-06 03:46:12 +00:00
|
|
|
/// Create a new operation pool.
|
|
|
|
pub fn new() -> Self {
|
|
|
|
Self::default()
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Insert a sync contribution into the pool. We don't aggregate these contributions until they
|
|
|
|
/// are retrieved from the pool.
|
|
|
|
///
|
|
|
|
/// ## Note
|
|
|
|
///
|
|
|
|
/// This function assumes the given `contribution` is valid.
|
|
|
|
pub fn insert_sync_contribution(
|
|
|
|
&self,
|
|
|
|
contribution: SyncCommitteeContribution<T>,
|
|
|
|
) -> Result<(), OpPoolError> {
|
|
|
|
let aggregate_id = SyncAggregateId::new(contribution.slot, contribution.beacon_block_root);
|
|
|
|
let mut contributions = self.sync_contributions.write();
|
|
|
|
|
|
|
|
match contributions.entry(aggregate_id) {
|
|
|
|
Entry::Vacant(entry) => {
|
|
|
|
// If no contributions exist for the key, insert the given contribution.
|
|
|
|
entry.insert(vec![contribution]);
|
|
|
|
}
|
|
|
|
Entry::Occupied(mut entry) => {
|
|
|
|
// If contributions exists for this key, check whether there exists a contribution
|
|
|
|
// with a matching `subcommittee_index`. If one exists, check whether the new or
|
|
|
|
// old contribution has more aggregation bits set. If the new one does, add it to the
|
|
|
|
// pool in place of the old one.
|
|
|
|
let existing_contributions = entry.get_mut();
|
|
|
|
match existing_contributions
|
|
|
|
.iter_mut()
|
|
|
|
.find(|existing_contribution| {
|
|
|
|
existing_contribution.subcommittee_index == contribution.subcommittee_index
|
|
|
|
}) {
|
|
|
|
Some(existing_contribution) => {
|
|
|
|
// Only need to replace the contribution if the new contribution has more
|
|
|
|
// bits set.
|
|
|
|
if existing_contribution.aggregation_bits.num_set_bits()
|
|
|
|
< contribution.aggregation_bits.num_set_bits()
|
|
|
|
{
|
|
|
|
*existing_contribution = contribution;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
// If there has been no previous sync contribution for this subcommittee index,
|
|
|
|
// add it to the pool.
|
|
|
|
existing_contributions.push(contribution);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Calculate the `SyncAggregate` from the sync contributions that exist in the pool for the
|
|
|
|
/// slot previous to the slot associated with `state`. Return the calculated `SyncAggregate` if
|
|
|
|
/// contributions exist at this slot, or else `None`.
|
|
|
|
pub fn get_sync_aggregate(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<T>,
|
|
|
|
) -> Result<Option<SyncAggregate<T>>, OpPoolError> {
|
|
|
|
// Sync aggregates are formed from the contributions from the previous slot.
|
|
|
|
let slot = state.slot().saturating_sub(1u64);
|
|
|
|
let block_root = *state
|
|
|
|
.get_block_root(slot)
|
|
|
|
.map_err(OpPoolError::GetBlockRootError)?;
|
|
|
|
let id = SyncAggregateId::new(slot, block_root);
|
|
|
|
self.sync_contributions
|
|
|
|
.read()
|
|
|
|
.get(&id)
|
|
|
|
.map(|contributions| SyncAggregate::from_contributions(contributions))
|
|
|
|
.transpose()
|
|
|
|
.map_err(|e| e.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Total number of sync contributions in the pool.
|
|
|
|
pub fn num_sync_contributions(&self) -> usize {
|
|
|
|
self.sync_contributions
|
|
|
|
.read()
|
|
|
|
.values()
|
|
|
|
.map(|contributions| contributions.len())
|
|
|
|
.sum()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove sync contributions which are too old to be included in a block.
|
|
|
|
pub fn prune_sync_contributions(&self, current_slot: Slot) {
|
|
|
|
// Prune sync contributions that are from before the previous slot.
|
|
|
|
self.sync_contributions.write().retain(|_, contributions| {
|
|
|
|
// All the contributions in this bucket have the same data, so we only need to
|
|
|
|
// check the first one.
|
|
|
|
contributions.first().map_or(false, |contribution| {
|
|
|
|
current_slot <= contribution.slot.saturating_add(Slot::new(1))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-03-19 08:15:33 +00:00
|
|
|
/// Insert an attestation into the pool, aggregating it with existing attestations if possible.
|
2019-08-14 00:55:24 +00:00
|
|
|
///
|
|
|
|
/// ## Note
|
|
|
|
///
|
|
|
|
/// This function assumes the given `attestation` is valid.
|
2019-03-19 08:15:33 +00:00
|
|
|
pub fn insert_attestation(
|
2019-03-30 01:26:25 +00:00
|
|
|
&self,
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
attestation: Attestation<T>,
|
2020-03-05 06:19:35 +00:00
|
|
|
fork: &Fork,
|
2020-04-01 11:03:03 +00:00
|
|
|
genesis_validators_root: Hash256,
|
2019-03-19 08:15:33 +00:00
|
|
|
spec: &ChainSpec,
|
2019-03-20 05:28:04 +00:00
|
|
|
) -> Result<(), AttestationValidationError> {
|
2020-04-01 11:03:03 +00:00
|
|
|
let id = AttestationId::from_data(&attestation.data, fork, genesis_validators_root, spec);
|
2019-03-19 08:15:33 +00:00
|
|
|
|
2019-03-30 01:26:25 +00:00
|
|
|
// Take a write lock on the attestations map.
|
|
|
|
let mut attestations = self.attestations.write();
|
|
|
|
|
|
|
|
let existing_attestations = match attestations.entry(id) {
|
2021-07-15 00:52:02 +00:00
|
|
|
Entry::Vacant(entry) => {
|
2019-03-19 08:15:33 +00:00
|
|
|
entry.insert(vec![attestation]);
|
|
|
|
return Ok(());
|
|
|
|
}
|
2021-07-15 00:52:02 +00:00
|
|
|
Entry::Occupied(entry) => entry.into_mut(),
|
2019-03-19 08:15:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let mut aggregated = false;
|
|
|
|
for existing_attestation in existing_attestations.iter_mut() {
|
|
|
|
if existing_attestation.signers_disjoint_from(&attestation) {
|
|
|
|
existing_attestation.aggregate(&attestation);
|
|
|
|
aggregated = true;
|
|
|
|
} else if *existing_attestation == attestation {
|
|
|
|
aggregated = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !aggregated {
|
|
|
|
existing_attestations.push(attestation);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-03-26 07:29:02 +00:00
|
|
|
/// Total number of attestations in the pool, including attestations for the same data.
|
|
|
|
pub fn num_attestations(&self) -> usize {
|
2019-05-09 03:35:00 +00:00
|
|
|
self.attestations.read().values().map(Vec::len).sum()
|
2019-03-26 07:29:02 +00:00
|
|
|
}
|
|
|
|
|
2021-04-13 05:27:42 +00:00
|
|
|
/// Return all valid attestations for the given epoch, for use in max cover.
|
|
|
|
fn get_valid_attestations_for_epoch<'a>(
|
|
|
|
&'a self,
|
|
|
|
epoch: Epoch,
|
|
|
|
all_attestations: &'a HashMap<AttestationId, Vec<Attestation<T>>>,
|
|
|
|
state: &'a BeaconState<T>,
|
|
|
|
total_active_balance: u64,
|
|
|
|
validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send,
|
|
|
|
spec: &'a ChainSpec,
|
|
|
|
) -> impl Iterator<Item = AttMaxCover<'a, T>> + Send {
|
|
|
|
let domain_bytes = AttestationId::compute_domain_bytes(
|
|
|
|
epoch,
|
2021-07-09 06:15:32 +00:00
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
2021-04-13 05:27:42 +00:00
|
|
|
spec,
|
|
|
|
);
|
|
|
|
all_attestations
|
|
|
|
.iter()
|
|
|
|
.filter(move |(key, _)| key.domain_bytes_match(&domain_bytes))
|
|
|
|
.flat_map(|(_, attestations)| attestations)
|
|
|
|
.filter(move |attestation| attestation.data.target.epoch == epoch)
|
|
|
|
.filter(move |attestation| {
|
|
|
|
// Ensure attestations are valid for block inclusion
|
|
|
|
verify_attestation_for_block_inclusion(
|
|
|
|
state,
|
|
|
|
attestation,
|
|
|
|
VerifySignatures::False,
|
|
|
|
spec,
|
|
|
|
)
|
|
|
|
.is_ok()
|
|
|
|
})
|
|
|
|
.filter(validity_filter)
|
|
|
|
.filter_map(move |att| AttMaxCover::new(att, state, total_active_balance, spec))
|
|
|
|
}
|
|
|
|
|
2019-03-19 08:15:33 +00:00
|
|
|
/// Get a list of attestations for inclusion in a block.
|
2020-01-23 00:35:13 +00:00
|
|
|
///
|
2020-04-20 02:34:37 +00:00
|
|
|
/// The `validity_filter` is a closure that provides extra filtering of the attestations
|
|
|
|
/// before an approximately optimal bundle is constructed. We use it to provide access
|
|
|
|
/// to the fork choice data from the `BeaconChain` struct that doesn't logically belong
|
|
|
|
/// in the operation pool.
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
pub fn get_attestations(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<T>,
|
2021-04-13 05:27:42 +00:00
|
|
|
prev_epoch_validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send,
|
|
|
|
curr_epoch_validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send,
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
spec: &ChainSpec,
|
2020-01-19 23:33:28 +00:00
|
|
|
) -> Result<Vec<Attestation<T>>, OpPoolError> {
|
2019-03-25 00:56:30 +00:00
|
|
|
// Attestations for the current fork, which may be from the current or previous epoch.
|
2019-05-22 04:41:35 +00:00
|
|
|
let prev_epoch = state.previous_epoch();
|
|
|
|
let current_epoch = state.current_epoch();
|
2021-04-13 05:27:42 +00:00
|
|
|
let all_attestations = self.attestations.read();
|
2020-01-19 23:33:28 +00:00
|
|
|
let total_active_balance = state
|
2021-09-03 07:50:43 +00:00
|
|
|
.get_total_active_balance()
|
2020-01-19 23:33:28 +00:00
|
|
|
.map_err(OpPoolError::GetAttestationsTotalBalanceError)?;
|
2019-06-17 08:07:14 +00:00
|
|
|
|
2021-04-13 05:27:42 +00:00
|
|
|
// Split attestations for the previous & current epochs, so that we
|
|
|
|
// can optimise them individually in parallel.
|
|
|
|
let prev_epoch_att = self.get_valid_attestations_for_epoch(
|
|
|
|
prev_epoch,
|
|
|
|
&*all_attestations,
|
|
|
|
state,
|
|
|
|
total_active_balance,
|
|
|
|
prev_epoch_validity_filter,
|
|
|
|
spec,
|
|
|
|
);
|
|
|
|
let curr_epoch_att = self.get_valid_attestations_for_epoch(
|
|
|
|
current_epoch,
|
|
|
|
&*all_attestations,
|
|
|
|
state,
|
|
|
|
total_active_balance,
|
|
|
|
curr_epoch_validity_filter,
|
|
|
|
spec,
|
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let prev_epoch_limit = if let BeaconState::Base(base_state) = state {
|
|
|
|
std::cmp::min(
|
|
|
|
T::MaxPendingAttestations::to_usize()
|
|
|
|
.saturating_sub(base_state.previous_epoch_attestations.len()),
|
|
|
|
T::MaxAttestations::to_usize(),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
T::MaxAttestations::to_usize()
|
|
|
|
};
|
2021-04-13 05:27:42 +00:00
|
|
|
|
|
|
|
let (prev_cover, curr_cover) = rayon::join(
|
|
|
|
move || {
|
|
|
|
let _timer = metrics::start_timer(&metrics::ATTESTATION_PREV_EPOCH_PACKING_TIME);
|
|
|
|
// If we're in the genesis epoch, just use the current epoch attestations.
|
|
|
|
if prev_epoch == current_epoch {
|
|
|
|
vec![]
|
|
|
|
} else {
|
|
|
|
maximum_cover(prev_epoch_att, prev_epoch_limit)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
move || {
|
|
|
|
let _timer = metrics::start_timer(&metrics::ATTESTATION_CURR_EPOCH_PACKING_TIME);
|
|
|
|
maximum_cover(curr_epoch_att, T::MaxAttestations::to_usize())
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(max_cover::merge_solutions(
|
|
|
|
curr_cover,
|
|
|
|
prev_cover,
|
2020-01-19 23:33:28 +00:00
|
|
|
T::MaxAttestations::to_usize(),
|
|
|
|
))
|
2019-03-19 08:15:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 23:47:19 +00:00
|
|
|
/// Remove attestations which are too old to be included in a block.
|
2020-10-22 04:47:29 +00:00
|
|
|
pub fn prune_attestations(&self, current_epoch: Epoch) {
|
|
|
|
// Prune attestations that are from before the previous epoch.
|
2019-03-30 01:26:25 +00:00
|
|
|
self.attestations.write().retain(|_, attestations| {
|
2019-03-19 23:47:19 +00:00
|
|
|
// All the attestations in this bucket have the same data, so we only need to
|
|
|
|
// check the first one.
|
2020-10-22 04:47:29 +00:00
|
|
|
attestations
|
|
|
|
.first()
|
|
|
|
.map_or(false, |att| current_epoch <= att.data.target.epoch + 1)
|
2019-03-19 23:47:19 +00:00
|
|
|
});
|
2019-03-19 08:15:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-06 03:46:12 +00:00
|
|
|
/// Insert a proposer slashing into the pool.
|
|
|
|
pub fn insert_proposer_slashing(
|
2019-03-30 01:26:25 +00:00
|
|
|
&self,
|
2020-06-18 11:06:34 +00:00
|
|
|
verified_proposer_slashing: SigVerifiedOp<ProposerSlashing>,
|
|
|
|
) {
|
|
|
|
let slashing = verified_proposer_slashing.into_inner();
|
2019-03-06 03:46:12 +00:00
|
|
|
self.proposer_slashings
|
2019-03-30 01:26:25 +00:00
|
|
|
.write()
|
2020-04-01 11:03:03 +00:00
|
|
|
.insert(slashing.signed_header_1.message.proposer_index, slashing);
|
2019-03-25 05:58:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert an attester slashing into the pool.
|
|
|
|
pub fn insert_attester_slashing(
|
2019-03-30 01:26:25 +00:00
|
|
|
&self,
|
2020-06-18 11:06:34 +00:00
|
|
|
verified_slashing: SigVerifiedOp<AttesterSlashing<T>>,
|
|
|
|
fork: Fork,
|
|
|
|
) {
|
|
|
|
self.attester_slashings
|
|
|
|
.write()
|
|
|
|
.insert((verified_slashing.into_inner(), fork.current_version));
|
2019-03-25 05:58:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get proposer and attester slashings for inclusion in a block.
|
|
|
|
///
|
|
|
|
/// This function computes both types of slashings together, because
|
|
|
|
/// attester slashings may be invalidated by proposer slashings included
|
|
|
|
/// earlier in the block.
|
|
|
|
pub fn get_slashings(
|
2019-03-06 03:46:12 +00:00
|
|
|
&self,
|
2019-05-08 07:07:26 +00:00
|
|
|
state: &BeaconState<T>,
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing<T>>) {
|
2019-03-25 05:58:20 +00:00
|
|
|
let proposer_slashings = filter_limit_operations(
|
2019-03-30 01:26:25 +00:00
|
|
|
self.proposer_slashings.read().values(),
|
2019-03-06 03:46:12 +00:00
|
|
|
|slashing| {
|
|
|
|
state
|
2021-07-09 06:15:32 +00:00
|
|
|
.validators()
|
2020-04-01 11:03:03 +00:00
|
|
|
.get(slashing.signed_header_1.message.proposer_index as usize)
|
2019-03-06 03:46:12 +00:00
|
|
|
.map_or(false, |validator| !validator.slashed)
|
|
|
|
},
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
T::MaxProposerSlashings::to_usize(),
|
2019-03-25 05:58:20 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Set of validators to be slashed, so we don't attempt to construct invalid attester
|
|
|
|
// slashings.
|
2020-10-22 01:43:54 +00:00
|
|
|
let to_be_slashed = proposer_slashings
|
2019-03-25 05:58:20 +00:00
|
|
|
.iter()
|
2020-04-01 11:03:03 +00:00
|
|
|
.map(|s| s.signed_header_1.message.proposer_index)
|
2019-03-25 05:58:20 +00:00
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
2020-10-22 01:43:54 +00:00
|
|
|
let reader = self.attester_slashings.read();
|
|
|
|
|
|
|
|
let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| {
|
2021-07-09 06:15:32 +00:00
|
|
|
if *fork == state.fork().previous_version || *fork == state.fork().current_version {
|
2021-07-30 01:11:47 +00:00
|
|
|
AttesterSlashingMaxCover::new(slashing, &to_be_slashed, state)
|
2020-10-22 01:43:54 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
let attester_slashings = maximum_cover(
|
|
|
|
relevant_attester_slashings,
|
|
|
|
T::MaxAttesterSlashings::to_usize(),
|
2021-04-13 05:27:42 +00:00
|
|
|
)
|
|
|
|
.into_iter()
|
|
|
|
.map(|cover| cover.object().clone())
|
|
|
|
.collect();
|
2019-03-25 05:58:20 +00:00
|
|
|
|
|
|
|
(proposer_slashings, attester_slashings)
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
/// Prune proposer slashings for validators which are exited in the finalized epoch.
|
|
|
|
pub fn prune_proposer_slashings(&self, head_state: &BeaconState<T>) {
|
2019-03-20 02:06:06 +00:00
|
|
|
prune_validator_hash_map(
|
2019-03-30 01:26:25 +00:00
|
|
|
&mut self.proposer_slashings.write(),
|
2021-07-09 06:15:32 +00:00
|
|
|
|validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch,
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
head_state,
|
2019-03-20 02:06:06 +00:00
|
|
|
);
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 05:58:20 +00:00
|
|
|
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
|
|
|
|
/// fork.
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
pub fn prune_attester_slashings(&self, head_state: &BeaconState<T>) {
|
2020-06-18 11:06:34 +00:00
|
|
|
self.attester_slashings
|
|
|
|
.write()
|
|
|
|
.retain(|(slashing, fork_version)| {
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
let previous_fork_is_finalized =
|
2021-07-09 06:15:32 +00:00
|
|
|
head_state.finalized_checkpoint().epoch >= head_state.fork().epoch;
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
// Prune any slashings which don't match the current fork version, or the previous
|
|
|
|
// fork version if it is not finalized yet.
|
2021-07-09 06:15:32 +00:00
|
|
|
let fork_ok = (*fork_version == head_state.fork().current_version)
|
|
|
|
|| (*fork_version == head_state.fork().previous_version
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
&& !previous_fork_is_finalized);
|
2020-06-18 11:06:34 +00:00
|
|
|
// Slashings that don't slash any validators can also be dropped.
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
let slashing_ok =
|
|
|
|
get_slashable_indices_modular(head_state, slashing, |_, validator| {
|
|
|
|
// Declare that a validator is still slashable if they have not exited prior
|
|
|
|
// to the finalized epoch.
|
|
|
|
//
|
|
|
|
// We cannot check the `slashed` field since the `head` is not finalized and
|
|
|
|
// a fork could un-slash someone.
|
2021-07-09 06:15:32 +00:00
|
|
|
validator.exit_epoch > head_state.finalized_checkpoint().epoch
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
})
|
|
|
|
.map_or(false, |indices| !indices.is_empty());
|
|
|
|
|
2020-06-18 11:06:34 +00:00
|
|
|
fork_ok && slashing_ok
|
|
|
|
});
|
2019-03-25 05:58:20 +00:00
|
|
|
}
|
2019-03-06 03:46:12 +00:00
|
|
|
|
2020-02-04 01:43:04 +00:00
|
|
|
/// Total number of attester slashings in the pool.
|
|
|
|
pub fn num_attester_slashings(&self) -> usize {
|
|
|
|
self.attester_slashings.read().len()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Total number of proposer slashings in the pool.
|
|
|
|
pub fn num_proposer_slashings(&self) -> usize {
|
|
|
|
self.proposer_slashings.read().len()
|
|
|
|
}
|
|
|
|
|
2020-06-18 11:06:34 +00:00
|
|
|
/// Insert a voluntary exit that has previously been checked elsewhere.
|
|
|
|
pub fn insert_voluntary_exit(&self, verified_exit: SigVerifiedOp<SignedVoluntaryExit>) {
|
|
|
|
let exit = verified_exit.into_inner();
|
2019-03-30 01:26:25 +00:00
|
|
|
self.voluntary_exits
|
|
|
|
.write()
|
2020-02-10 23:19:36 +00:00
|
|
|
.insert(exit.message.validator_index, exit);
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get a list of voluntary exits for inclusion in a block.
|
2019-05-08 07:07:26 +00:00
|
|
|
pub fn get_voluntary_exits(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<T>,
|
|
|
|
spec: &ChainSpec,
|
2020-02-10 23:19:36 +00:00
|
|
|
) -> Vec<SignedVoluntaryExit> {
|
2019-03-06 03:46:12 +00:00
|
|
|
filter_limit_operations(
|
2019-03-30 01:26:25 +00:00
|
|
|
self.voluntary_exits.read().values(),
|
2019-08-29 01:34:25 +00:00
|
|
|
|exit| verify_exit(state, exit, VerifySignatures::False, spec).is_ok(),
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
T::MaxVoluntaryExits::to_usize(),
|
2019-03-06 03:46:12 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
/// Prune if validator has already exited at or before the finalized checkpoint of the head.
|
|
|
|
pub fn prune_voluntary_exits(&self, head_state: &BeaconState<T>) {
|
2019-03-20 02:06:06 +00:00
|
|
|
prune_validator_hash_map(
|
2019-03-30 01:26:25 +00:00
|
|
|
&mut self.voluntary_exits.write(),
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
// This condition is slightly too loose, since there will be some finalized exits that
|
|
|
|
// are missed here.
|
|
|
|
//
|
|
|
|
// We choose simplicity over the gain of pruning more exits since they are small and
|
|
|
|
// should not be seen frequently.
|
2021-07-09 06:15:32 +00:00
|
|
|
|validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch,
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
head_state,
|
2019-03-20 02:06:06 +00:00
|
|
|
);
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
/// Prune all types of transactions given the latest head state and head fork.
|
|
|
|
pub fn prune_all(&self, head_state: &BeaconState<T>, current_epoch: Epoch) {
|
2020-10-22 04:47:29 +00:00
|
|
|
self.prune_attestations(current_epoch);
|
2021-07-15 00:52:02 +00:00
|
|
|
self.prune_sync_contributions(head_state.slot());
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
self.prune_proposer_slashings(head_state);
|
|
|
|
self.prune_attester_slashings(head_state);
|
|
|
|
self.prune_voluntary_exits(head_state);
|
2019-03-19 23:14:31 +00:00
|
|
|
}
|
2020-02-04 01:43:04 +00:00
|
|
|
|
|
|
|
/// Total number of voluntary exits in the pool.
|
|
|
|
pub fn num_voluntary_exits(&self) -> usize {
|
|
|
|
self.voluntary_exits.read().len()
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
/// Returns all known `Attestation` objects.
|
|
|
|
///
|
|
|
|
/// This method may return objects that are invalid for block inclusion.
|
|
|
|
pub fn get_all_attestations(&self) -> Vec<Attestation<T>> {
|
|
|
|
self.attestations
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.map(|(_, attns)| attns.iter().cloned())
|
|
|
|
.flatten()
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
/// Returns all known `Attestation` objects that pass the provided filter.
|
|
|
|
///
|
|
|
|
/// This method may return objects that are invalid for block inclusion.
|
|
|
|
pub fn get_filtered_attestations<F>(&self, filter: F) -> Vec<Attestation<T>>
|
|
|
|
where
|
|
|
|
F: Fn(&Attestation<T>) -> bool,
|
|
|
|
{
|
|
|
|
self.attestations
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.map(|(_, attns)| attns.iter().cloned())
|
|
|
|
.flatten()
|
|
|
|
.filter(filter)
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/// Returns all known `AttesterSlashing` objects.
|
|
|
|
///
|
|
|
|
/// This method may return objects that are invalid for block inclusion.
|
|
|
|
pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<T>> {
|
|
|
|
self.attester_slashings
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.map(|(slashing, _)| slashing.clone())
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns all known `ProposerSlashing` objects.
|
|
|
|
///
|
|
|
|
/// This method may return objects that are invalid for block inclusion.
|
|
|
|
pub fn get_all_proposer_slashings(&self) -> Vec<ProposerSlashing> {
|
|
|
|
self.proposer_slashings
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.map(|(_, slashing)| slashing.clone())
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns all known `SignedVoluntaryExit` objects.
|
|
|
|
///
|
|
|
|
/// This method may return objects that are invalid for block inclusion.
|
|
|
|
pub fn get_all_voluntary_exits(&self) -> Vec<SignedVoluntaryExit> {
|
|
|
|
self.voluntary_exits
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.map(|(_, exit)| exit.clone())
|
|
|
|
.collect()
|
|
|
|
}
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 05:58:20 +00:00
|
|
|
/// Filter up to a maximum number of operations out of an iterator.
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: usize) -> Vec<T>
|
2019-03-06 03:46:12 +00:00
|
|
|
where
|
|
|
|
I: IntoIterator<Item = &'a T>,
|
|
|
|
F: Fn(&T) -> bool,
|
|
|
|
T: Clone,
|
|
|
|
{
|
|
|
|
operations
|
|
|
|
.into_iter()
|
|
|
|
.filter(|x| filter(*x))
|
Update to frozen spec ❄️ (v0.8.1) (#444)
* types: first updates for v0.8
* state_processing: epoch processing v0.8.0
* state_processing: block processing v0.8.0
* tree_hash_derive: support generics in SignedRoot
* types v0.8: update to use ssz_types
* state_processing v0.8: use ssz_types
* ssz_types: add bitwise methods and from_elem
* types: fix v0.8 FIXMEs
* ssz_types: add bitfield shift_up
* ssz_types: iterators and DerefMut for VariableList
* types,state_processing: use VariableList
* ssz_types: fix BitVector Decode impl
Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it
to be considered variable length!
* types: fix test modules for v0.8 update
* types: remove slow type-level arithmetic
* state_processing: fix tests for v0.8
* op_pool: update for v0.8
* ssz_types: Bitfield difference length-independent
Allow computing the difference of two bitfields of different lengths.
* Implement compact committee support
* epoch_processing: committee & active index roots
* state_processing: genesis state builder v0.8
* state_processing: implement v0.8.1
* Further improve tree_hash
* Strip examples, tests from cached_tree_hash
* Update TreeHash, un-impl CachedTreeHash
* Update bitfield TreeHash, un-impl CachedTreeHash
* Update FixedLenVec TreeHash, unimpl CachedTreeHash
* Update update tree_hash_derive for new TreeHash
* Fix TreeHash, un-impl CachedTreeHash for ssz_types
* Remove fixed_len_vec, ssz benches
SSZ benches relied upon fixed_len_vec -- it is easier to just delete
them and rebuild them later (when necessary)
* Remove boolean_bitfield crate
* Fix fake_crypto BLS compile errors
* Update ef_tests for new v.8 type params
* Update ef_tests submodule to v0.8.1 tag
* Make fixes to support parsing ssz ef_tests
* `compact_committee...` to `compact_committees...`
* Derive more traits for `CompactCommittee`
* Flip bitfield byte-endianness
* Fix tree_hash for bitfields
* Modify CLI output for ef_tests
* Bump ssz crate version
* Update ssz_types doc comment
* Del cached tree hash tests from ssz_static tests
* Tidy SSZ dependencies
* Rename ssz_types crate to eth2_ssz_types
* validator_client: update for v0.8
* ssz_types: update union/difference for bit order swap
* beacon_node: update for v0.8, EthSpec
* types: disable cached tree hash, update min spec
* state_processing: fix slot bug in committee update
* tests: temporarily disable fork choice harness test
See #447
* committee cache: prevent out-of-bounds access
In the case where we tried to access the committee of a shard that didn't have a committee in the
current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This
commit adds a check to make the failure safe and explicit.
* fix bug in get_indexed_attestation and simplify
There was a bug in our implementation of get_indexed_attestation whereby
incorrect "committee indices" were used to index into the custody bitfield. The
bug was only observable in the case where some bits of the custody bitfield were
set to 1. The implementation has been simplified to remove the bug, and a test
added.
* state_proc: workaround for compact committees bug
https://github.com/ethereum/eth2.0-specs/issues/1315
* v0.8: updates to make the EF tests pass
* Remove redundant max operation checks.
* Always supply both messages when checking attestation signatures -- allowing
verification of an attestation with no signatures.
* Swap the order of the fork and domain constant in `get_domain`, to match
the spec.
* rustfmt
* ef_tests: add new epoch processing tests
* Integrate v0.8 into master (compiles)
* Remove unused crates, fix clippy lints
* Replace v0.6.3 tags w/ v0.8.1
* Remove old comment
* Ensure lmd ghost tests only run in release
* Update readme
2019-07-30 02:44:51 +00:00
|
|
|
.take(limit)
|
2019-03-06 03:46:12 +00:00
|
|
|
.cloned()
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-03-20 02:06:06 +00:00
|
|
|
/// Remove all entries from the given hash map for which `prune_if` returns true.
|
|
|
|
///
|
|
|
|
/// The keys in the map should be validator indices, which will be looked up
|
|
|
|
/// in the state's validator registry and then passed to `prune_if`.
|
|
|
|
/// Entries for unknown validators will be kept.
|
2019-05-13 04:44:43 +00:00
|
|
|
fn prune_validator_hash_map<T, F, E: EthSpec>(
|
2019-03-20 02:06:06 +00:00
|
|
|
map: &mut HashMap<u64, T>,
|
|
|
|
prune_if: F,
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
head_state: &BeaconState<E>,
|
2019-03-20 02:06:06 +00:00
|
|
|
) where
|
|
|
|
F: Fn(&Validator) -> bool,
|
|
|
|
{
|
|
|
|
map.retain(|&validator_index, _| {
|
Address queue congestion in migrator (#1923)
## Issue Addressed
*Should* address #1917
## Proposed Changes
Stops the `BackgroupMigrator` rx channel from backing up with big `BeaconState` messages.
Looking at some logs from my Medalla node, we can see a discrepancy between the head finalized epoch and the migrator finalized epoch:
```
Nov 17 16:50:21.606 DEBG Head beacon block slot: 129214, root: 0xbc7a…0b99, finalized_epoch: 4033, finalized_root: 0xf930…6562, justified_epoch: 4035, justified_root: 0x206b…9321, service: beacon
Nov 17 16:50:21.626 DEBG Batch processed service: sync, processed_blocks: 43, last_block_slot: 129214, chain: 8274002112260436595, first_block_slot: 129153, batch_epoch: 4036
Nov 17 16:50:21.626 DEBG Chain advanced processing_target: 4036, new_start: 4036, previous_start: 4034, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Completed batch received awaiting_batches: 5, blocks: 47, epoch: 4048, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.162 DEBG Requesting batch start_slot: 129601, end_slot: 129664, downloaded: 0, processed: 0, state: Downloading(16Uiu2HAmG3C3t1McaseReECjAF694tjVVjkDoneZEbxNhWm1nZaT, 0 blocks, 1273), epoch: 4050, chain: 8274002112260436595, service: sync
Nov 17 16:50:22.654 DEBG Database compaction complete service: beacon
Nov 17 16:50:22.655 INFO Starting database pruning new_finalized_epoch: 2193, old_finalized_epoch: 2192, service: beacon
```
I believe this indicates that the migrator rx has a backed-up queue of `MigrationNotification` items which each contain a `BeaconState`.
## TODO
- [x] Remove finalized state requirement for op-pool
2020-11-17 23:11:26 +00:00
|
|
|
head_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.validators()
|
2019-03-20 02:06:06 +00:00
|
|
|
.get(validator_index as usize)
|
|
|
|
.map_or(true, |validator| !prune_if(validator))
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-06-26 03:06:08 +00:00
|
|
|
/// Compare two operation pools.
|
|
|
|
impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
|
|
|
|
fn eq(&self, other: &Self) -> bool {
|
2020-06-08 21:08:54 +00:00
|
|
|
if ptr::eq(self, other) {
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-26 03:06:08 +00:00
|
|
|
*self.attestations.read() == *other.attestations.read()
|
|
|
|
&& *self.attester_slashings.read() == *other.attester_slashings.read()
|
|
|
|
&& *self.proposer_slashings.read() == *other.proposer_slashings.read()
|
|
|
|
&& *self.voluntary_exits.read() == *other.voluntary_exits.read()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
#[cfg(all(test, not(debug_assertions)))]
|
|
|
|
mod release_tests {
|
2020-01-19 23:33:28 +00:00
|
|
|
use super::attestation::earliest_attestation_validators;
|
2019-03-06 03:46:12 +00:00
|
|
|
use super::*;
|
2021-07-15 00:52:02 +00:00
|
|
|
use beacon_chain::test_utils::{
|
|
|
|
BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee,
|
|
|
|
};
|
|
|
|
use lazy_static::lazy_static;
|
2020-06-18 11:06:34 +00:00
|
|
|
use state_processing::{
|
2021-07-09 06:15:32 +00:00
|
|
|
common::{base::get_base_reward, get_attesting_indices},
|
2020-06-18 11:06:34 +00:00
|
|
|
VerifyOperation,
|
|
|
|
};
|
2020-01-19 23:33:28 +00:00
|
|
|
use std::collections::BTreeSet;
|
2020-04-06 04:13:19 +00:00
|
|
|
use std::iter::FromIterator;
|
2021-07-15 00:52:02 +00:00
|
|
|
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
2019-03-20 04:57:41 +00:00
|
|
|
use types::*;
|
2019-03-06 03:46:12 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
pub const MAX_VALIDATOR_COUNT: usize = 4 * 32 * 128;
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(MAX_VALIDATOR_COUNT);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_harness<E: EthSpec>(
|
|
|
|
validator_count: usize,
|
2021-07-15 00:52:02 +00:00
|
|
|
spec: Option<ChainSpec>,
|
2021-07-09 06:15:32 +00:00
|
|
|
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness =
|
|
|
|
BeaconChainHarness::new(E::default(), spec, KEYPAIRS[0..validator_count].to_vec());
|
2021-07-09 06:15:32 +00:00
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
|
|
|
|
harness
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
/// Test state for attestation-related tests.
|
|
|
|
fn attestation_test_state<E: EthSpec>(
|
|
|
|
num_committees: usize,
|
2021-07-09 06:15:32 +00:00
|
|
|
) -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec) {
|
2019-12-16 23:37:12 +00:00
|
|
|
let spec = E::default_spec();
|
|
|
|
|
|
|
|
let num_validators =
|
|
|
|
num_committees * E::slots_per_epoch() as usize * spec.target_committee_size;
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness::<E>(num_validators, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
(harness, spec)
|
|
|
|
}
|
2021-07-09 06:15:32 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Test state for sync contribution-related tests.
|
|
|
|
fn sync_contribution_test_state<E: EthSpec>(
|
|
|
|
num_committees: usize,
|
|
|
|
) -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec) {
|
|
|
|
let mut spec = E::default_spec();
|
|
|
|
|
|
|
|
spec.altair_fork_epoch = Some(Epoch::new(0));
|
|
|
|
|
|
|
|
let num_validators =
|
|
|
|
num_committees * E::slots_per_epoch() as usize * spec.target_committee_size;
|
|
|
|
let harness = get_harness::<E>(num_validators, Some(spec.clone()));
|
|
|
|
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
harness.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
Hash256::zero(),
|
|
|
|
&[Slot::new(1)],
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
);
|
2021-07-09 06:15:32 +00:00
|
|
|
|
|
|
|
(harness, spec)
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-20 01:44:37 +00:00
|
|
|
#[test]
|
2019-12-16 23:37:12 +00:00
|
|
|
fn test_earliest_attestation() {
|
2021-07-09 06:15:32 +00:00
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
|
|
|
|
let mut state = harness.get_current_state();
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2019-12-16 23:37:12 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
2019-03-20 04:57:41 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators =
|
|
|
|
MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size;
|
|
|
|
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
for (atts, aggregate) in &attestations {
|
|
|
|
let att2 = aggregate.as_ref().unwrap().message.aggregate.clone();
|
|
|
|
|
|
|
|
let att1 = atts
|
|
|
|
.into_iter()
|
|
|
|
.map(|(att, _)| att)
|
|
|
|
.take(2)
|
|
|
|
.fold::<Option<Attestation<MainnetEthSpec>>, _>(None, |att, new_att| {
|
|
|
|
if let Some(mut a) = att {
|
|
|
|
a.aggregate(&new_att);
|
|
|
|
Some(a)
|
|
|
|
} else {
|
|
|
|
Some(new_att.clone())
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.unwrap();
|
2019-05-09 23:45:28 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(
|
|
|
|
att1.aggregation_bits.num_set_bits(),
|
2021-07-09 06:15:32 +00:00
|
|
|
earliest_attestation_validators(&att1, &state, state.as_base().unwrap())
|
|
|
|
.num_set_bits()
|
2019-12-16 23:37:12 +00:00
|
|
|
);
|
2021-07-09 06:15:32 +00:00
|
|
|
|
|
|
|
// FIXME(altair): handle altair in these tests
|
2019-12-16 23:37:12 +00:00
|
|
|
state
|
2021-07-09 06:15:32 +00:00
|
|
|
.as_base_mut()
|
|
|
|
.unwrap()
|
2019-12-16 23:37:12 +00:00
|
|
|
.current_epoch_attestations
|
|
|
|
.push(PendingAttestation {
|
|
|
|
aggregation_bits: att1.aggregation_bits.clone(),
|
|
|
|
data: att1.data.clone(),
|
|
|
|
inclusion_delay: 0,
|
|
|
|
proposer_index: 0,
|
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
committees.get(0).unwrap().committee.len() - 2,
|
|
|
|
earliest_attestation_validators(&att2, &state, state.as_base().unwrap())
|
|
|
|
.num_set_bits()
|
2019-03-26 07:29:02 +00:00
|
|
|
);
|
2019-05-09 08:56:41 +00:00
|
|
|
}
|
2019-12-16 23:37:12 +00:00
|
|
|
}
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
/// End-to-end test of basic attestation handling.
|
|
|
|
#[test]
|
|
|
|
fn attestation_aggregation_insert_get_prune() {
|
2021-07-09 06:15:32 +00:00
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
|
2019-12-16 23:37:12 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
let mut state = harness.get_current_state();
|
2019-12-16 23:37:12 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2019-12-16 23:37:12 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
committees.len(),
|
|
|
|
1,
|
|
|
|
"we expect just one committee with this many validators"
|
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators =
|
|
|
|
MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size;
|
|
|
|
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
for (atts, _) in attestations {
|
|
|
|
for att in atts.into_iter() {
|
2020-04-01 11:03:03 +00:00
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.insert_attestation(att.0, &state.fork(), state.genesis_validators_root(), spec)
|
2020-04-01 11:03:03 +00:00
|
|
|
.unwrap();
|
2019-03-26 07:29:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(op_pool.attestations.read().len(), committees.len());
|
|
|
|
assert_eq!(op_pool.num_attestations(), committees.len());
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
// Before the min attestation inclusion delay, get_attestations shouldn't return anything.
|
2020-01-19 23:33:28 +00:00
|
|
|
assert_eq!(
|
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_attestations(&state, |_| true, |_| true, spec)
|
2020-01-19 23:33:28 +00:00
|
|
|
.expect("should have attestations")
|
|
|
|
.len(),
|
|
|
|
0
|
|
|
|
);
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
// Then once the delay has elapsed, we should get a single aggregated attestation.
|
2021-07-09 06:15:32 +00:00
|
|
|
*state.slot_mut() += spec.min_attestation_inclusion_delay;
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2020-01-19 23:33:28 +00:00
|
|
|
let block_attestations = op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_attestations(&state, |_| true, |_| true, spec)
|
2020-01-19 23:33:28 +00:00
|
|
|
.expect("Should have block attestations");
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(block_attestations.len(), committees.len());
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
let agg_att = &block_attestations[0];
|
|
|
|
assert_eq!(
|
|
|
|
agg_att.aggregation_bits.num_set_bits(),
|
|
|
|
spec.target_committee_size as usize
|
|
|
|
);
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
// Prune attestations shouldn't do anything at this point.
|
2020-10-22 04:47:29 +00:00
|
|
|
op_pool.prune_attestations(state.current_epoch());
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(op_pool.num_attestations(), committees.len());
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
// But once we advance to more than an epoch after the attestation, it should prune it
|
|
|
|
// out of existence.
|
2021-07-09 06:15:32 +00:00
|
|
|
*state.slot_mut() += 2 * MainnetEthSpec::slots_per_epoch();
|
2020-10-22 04:47:29 +00:00
|
|
|
op_pool.prune_attestations(state.current_epoch());
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(op_pool.num_attestations(), 0);
|
|
|
|
}
|
2019-05-09 08:56:41 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
/// Adding an attestation already in the pool should not increase the size of the pool.
|
|
|
|
#[test]
|
|
|
|
fn attestation_duplicate() {
|
2021-07-09 06:15:32 +00:00
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
|
2019-05-09 08:56:41 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let state = harness.get_current_state();
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2019-12-16 23:37:12 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
2019-05-09 08:56:41 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators =
|
|
|
|
MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size;
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
for (_, aggregate) in attestations {
|
|
|
|
let att = aggregate.unwrap().message.aggregate;
|
2019-12-16 23:37:12 +00:00
|
|
|
op_pool
|
2020-04-01 11:03:03 +00:00
|
|
|
.insert_attestation(
|
|
|
|
att.clone(),
|
2021-07-09 06:15:32 +00:00
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
2020-04-01 11:03:03 +00:00
|
|
|
spec,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec)
|
2019-12-16 23:37:12 +00:00
|
|
|
.unwrap();
|
2019-03-26 07:29:02 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(op_pool.num_attestations(), committees.len());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adding lots of attestations that only intersect pairwise should lead to two aggregate
|
|
|
|
/// attestations.
|
|
|
|
#[test]
|
|
|
|
fn attestation_pairwise_overlapping() {
|
2021-07-09 06:15:32 +00:00
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(1);
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let state = harness.get_current_state();
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2019-12-16 23:37:12 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
2019-06-03 05:25:06 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators =
|
|
|
|
MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size;
|
|
|
|
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
let step_size = 2;
|
2021-07-09 06:15:32 +00:00
|
|
|
// Create attestations that overlap on `step_size` validators, like:
|
|
|
|
// {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ...
|
|
|
|
for (atts1, _) in attestations {
|
|
|
|
let atts2 = atts1.clone();
|
|
|
|
let aggs1 = atts1
|
|
|
|
.chunks_exact(step_size * 2)
|
|
|
|
.map(|chunk| {
|
|
|
|
let agg = chunk.into_iter().map(|(att, _)| att).fold::<Option<
|
|
|
|
Attestation<MainnetEthSpec>,
|
|
|
|
>, _>(
|
|
|
|
None,
|
|
|
|
|att, new_att| {
|
|
|
|
if let Some(mut a) = att {
|
|
|
|
a.aggregate(new_att);
|
|
|
|
Some(a)
|
|
|
|
} else {
|
|
|
|
Some(new_att.clone())
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
agg.unwrap()
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
let aggs2 = atts2
|
|
|
|
.into_iter()
|
|
|
|
.skip(step_size)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.as_slice()
|
|
|
|
.chunks_exact(step_size * 2)
|
|
|
|
.map(|chunk| {
|
|
|
|
let agg = chunk.into_iter().map(|(att, _)| att).fold::<Option<
|
|
|
|
Attestation<MainnetEthSpec>,
|
|
|
|
>, _>(
|
|
|
|
None,
|
|
|
|
|att, new_att| {
|
|
|
|
if let Some(mut a) = att {
|
|
|
|
a.aggregate(new_att);
|
|
|
|
Some(a)
|
|
|
|
} else {
|
|
|
|
Some(new_att.clone())
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
agg.unwrap()
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
for att in aggs1.into_iter().chain(aggs2.into_iter()) {
|
2020-04-01 11:03:03 +00:00
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec)
|
2020-04-01 11:03:03 +00:00
|
|
|
.unwrap();
|
2019-03-26 07:29:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
// The attestations should get aggregated into two attestations that comprise all
|
|
|
|
// validators.
|
|
|
|
assert_eq!(op_pool.attestations.read().len(), committees.len());
|
|
|
|
assert_eq!(op_pool.num_attestations(), 2 * committees.len());
|
|
|
|
}
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
/// Create a bunch of attestations signed by a small number of validators, and another
|
|
|
|
/// bunch signed by a larger number, such that there are at least `max_attestations`
|
|
|
|
/// signed by the larger number. Then, check that `get_attestations` only returns the
|
|
|
|
/// high-quality attestations. To ensure that no aggregation occurs, ALL attestations
|
|
|
|
/// are also signed by the 0th member of the committee.
|
|
|
|
#[test]
|
|
|
|
fn attestation_get_max() {
|
|
|
|
let small_step_size = 2;
|
|
|
|
let big_step_size = 4;
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_committees = big_step_size;
|
|
|
|
|
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(num_committees);
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let mut state = harness.get_current_state();
|
2019-05-09 08:56:41 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2019-12-16 23:37:12 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
let max_attestations = <MainnetEthSpec as EthSpec>::MaxAttestations::to_usize();
|
|
|
|
let target_committee_size = spec.target_committee_size as usize;
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators = num_committees
|
|
|
|
* MainnetEthSpec::slots_per_epoch() as usize
|
|
|
|
* spec.target_committee_size;
|
|
|
|
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
2019-12-16 23:37:12 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let insert_attestations = |attestations: Vec<(Attestation<MainnetEthSpec>, SubnetId)>,
|
|
|
|
step_size| {
|
|
|
|
let att_0 = attestations.get(0).unwrap().0.clone();
|
|
|
|
let aggs = attestations
|
|
|
|
.chunks_exact(step_size)
|
|
|
|
.map(|chunk| {
|
|
|
|
chunk
|
|
|
|
.into_iter()
|
|
|
|
.map(|(att, _)| att)
|
|
|
|
.fold::<Attestation<MainnetEthSpec>, _>(
|
|
|
|
att_0.clone(),
|
|
|
|
|mut att, new_att| {
|
|
|
|
att.aggregate(new_att);
|
|
|
|
att
|
|
|
|
},
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
for att in aggs {
|
2020-04-01 11:03:03 +00:00
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec)
|
2020-04-01 11:03:03 +00:00
|
|
|
.unwrap();
|
2019-05-09 08:56:41 +00:00
|
|
|
}
|
2019-12-16 23:37:12 +00:00
|
|
|
};
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
for (atts, _) in attestations {
|
|
|
|
assert_eq!(atts.len(), target_committee_size);
|
2019-12-16 23:37:12 +00:00
|
|
|
// Attestations signed by only 2-3 validators
|
2021-07-09 06:15:32 +00:00
|
|
|
insert_attestations(atts.clone(), small_step_size);
|
2019-12-16 23:37:12 +00:00
|
|
|
// Attestations signed by 4+ validators
|
2021-07-09 06:15:32 +00:00
|
|
|
insert_attestations(atts, big_step_size);
|
2019-12-16 23:37:12 +00:00
|
|
|
}
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
let num_small = target_committee_size / small_step_size;
|
|
|
|
let num_big = target_committee_size / big_step_size;
|
2019-03-26 07:29:02 +00:00
|
|
|
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(op_pool.attestations.read().len(), committees.len());
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_attestations(),
|
|
|
|
(num_small + num_big) * committees.len()
|
|
|
|
);
|
|
|
|
assert!(op_pool.num_attestations() > max_attestations);
|
2019-05-09 08:56:41 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
*state.slot_mut() += spec.min_attestation_inclusion_delay;
|
2020-01-19 23:33:28 +00:00
|
|
|
let best_attestations = op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_attestations(&state, |_| true, |_| true, spec)
|
2020-01-19 23:33:28 +00:00
|
|
|
.expect("should have best attestations");
|
2019-12-16 23:37:12 +00:00
|
|
|
assert_eq!(best_attestations.len(), max_attestations);
|
|
|
|
|
|
|
|
// All the best attestations should be signed by at least `big_step_size` (4) validators.
|
|
|
|
for att in &best_attestations {
|
|
|
|
assert!(att.aggregation_bits.num_set_bits() >= big_step_size);
|
2019-03-26 07:29:02 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-19 23:33:28 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn attestation_rewards() {
|
|
|
|
let small_step_size = 2;
|
|
|
|
let big_step_size = 4;
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_committees = big_step_size;
|
2020-01-19 23:33:28 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let (harness, ref spec) = attestation_test_state::<MainnetEthSpec>(num_committees);
|
2020-01-19 23:33:28 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let mut state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
2020-01-19 23:33:28 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let slot = state.slot();
|
2020-01-19 23:33:28 +00:00
|
|
|
let committees = state
|
|
|
|
.get_beacon_committees_at_slot(slot)
|
|
|
|
.unwrap()
|
|
|
|
.into_iter()
|
|
|
|
.map(BeaconCommittee::into_owned)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let max_attestations = <MainnetEthSpec as EthSpec>::MaxAttestations::to_usize();
|
|
|
|
let target_committee_size = spec.target_committee_size as usize;
|
|
|
|
|
|
|
|
// Each validator will have a multiple of 1_000_000_000 wei.
|
|
|
|
// Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000).
|
2021-07-09 06:15:32 +00:00
|
|
|
for i in 0..state.validators().len() {
|
|
|
|
state.validators_mut()[i].effective_balance = 1_000_000_000 * i as u64;
|
2020-01-19 23:33:28 +00:00
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let num_validators = num_committees
|
|
|
|
* MainnetEthSpec::slots_per_epoch() as usize
|
|
|
|
* spec.target_committee_size;
|
|
|
|
let attestations = harness.make_attestations(
|
|
|
|
(0..num_validators).collect::<Vec<_>>().as_slice(),
|
|
|
|
&state,
|
|
|
|
Hash256::zero(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
let insert_attestations = |attestations: Vec<(Attestation<MainnetEthSpec>, SubnetId)>,
|
|
|
|
step_size| {
|
|
|
|
let att_0 = attestations.get(0).unwrap().0.clone();
|
|
|
|
let aggs = attestations
|
|
|
|
.chunks_exact(step_size)
|
|
|
|
.map(|chunk| {
|
|
|
|
chunk
|
|
|
|
.into_iter()
|
|
|
|
.map(|(att, _)| att)
|
|
|
|
.fold::<Attestation<MainnetEthSpec>, _>(
|
|
|
|
att_0.clone(),
|
|
|
|
|mut att, new_att| {
|
|
|
|
att.aggregate(new_att);
|
|
|
|
att
|
|
|
|
},
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
for att in aggs {
|
2020-04-01 11:03:03 +00:00
|
|
|
op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec)
|
2020-04-01 11:03:03 +00:00
|
|
|
.unwrap();
|
2020-01-19 23:33:28 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
for (atts, _) in attestations {
|
|
|
|
assert_eq!(atts.len(), target_committee_size);
|
2020-01-19 23:33:28 +00:00
|
|
|
// Attestations signed by only 2-3 validators
|
2021-07-09 06:15:32 +00:00
|
|
|
insert_attestations(atts.clone(), small_step_size);
|
2020-01-19 23:33:28 +00:00
|
|
|
// Attestations signed by 4+ validators
|
2021-07-09 06:15:32 +00:00
|
|
|
insert_attestations(atts, big_step_size);
|
2020-01-19 23:33:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let num_small = target_committee_size / small_step_size;
|
|
|
|
let num_big = target_committee_size / big_step_size;
|
|
|
|
|
|
|
|
assert_eq!(op_pool.attestations.read().len(), committees.len());
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_attestations(),
|
|
|
|
(num_small + num_big) * committees.len()
|
|
|
|
);
|
|
|
|
assert!(op_pool.num_attestations() > max_attestations);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
*state.slot_mut() += spec.min_attestation_inclusion_delay;
|
2020-01-19 23:33:28 +00:00
|
|
|
let best_attestations = op_pool
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_attestations(&state, |_| true, |_| true, spec)
|
2020-01-19 23:33:28 +00:00
|
|
|
.expect("should have valid best attestations");
|
|
|
|
assert_eq!(best_attestations.len(), max_attestations);
|
|
|
|
|
2021-09-03 07:50:43 +00:00
|
|
|
let total_active_balance = state.get_total_active_balance().unwrap();
|
2020-01-19 23:33:28 +00:00
|
|
|
|
|
|
|
// Set of indices covered by previous attestations in `best_attestations`.
|
|
|
|
let mut seen_indices = BTreeSet::new();
|
|
|
|
// Used for asserting that rewards are in decreasing order.
|
|
|
|
let mut prev_reward = u64::max_value();
|
|
|
|
|
|
|
|
for att in &best_attestations {
|
2021-07-09 06:15:32 +00:00
|
|
|
let fresh_validators_bitlist =
|
|
|
|
earliest_attestation_validators(att, &state, state.as_base().unwrap());
|
2020-03-05 06:19:35 +00:00
|
|
|
let committee = state
|
|
|
|
.get_beacon_committee(att.data.slot, att.data.index)
|
|
|
|
.expect("should get beacon committee");
|
2020-04-06 04:13:19 +00:00
|
|
|
|
|
|
|
let att_indices = BTreeSet::from_iter(
|
|
|
|
get_attesting_indices::<MainnetEthSpec>(
|
|
|
|
committee.committee,
|
|
|
|
&fresh_validators_bitlist,
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
|
2020-01-19 23:33:28 +00:00
|
|
|
let fresh_indices = &att_indices - &seen_indices;
|
|
|
|
|
|
|
|
let rewards = fresh_indices
|
|
|
|
.iter()
|
|
|
|
.map(|validator_index| {
|
2021-07-09 06:15:32 +00:00
|
|
|
get_base_reward(
|
|
|
|
&state,
|
|
|
|
*validator_index as usize,
|
|
|
|
total_active_balance,
|
|
|
|
spec,
|
|
|
|
)
|
|
|
|
.unwrap()
|
2020-01-19 23:33:28 +00:00
|
|
|
/ spec.proposer_reward_quotient
|
|
|
|
})
|
|
|
|
.sum();
|
|
|
|
|
|
|
|
// Check that rewards are in decreasing order
|
|
|
|
assert!(prev_reward >= rewards);
|
|
|
|
|
|
|
|
prev_reward = rewards;
|
|
|
|
seen_indices.extend(fresh_indices);
|
|
|
|
}
|
|
|
|
}
|
2020-04-30 05:21:43 +00:00
|
|
|
|
|
|
|
/// Insert two slashings for the same proposer and ensure only one is returned.
|
|
|
|
#[test]
|
|
|
|
fn duplicate_proposer_slashing() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
2020-04-30 05:21:43 +00:00
|
|
|
let proposer_index = 0;
|
2021-07-09 06:15:32 +00:00
|
|
|
let slashing1 = harness.make_proposer_slashing(proposer_index);
|
|
|
|
|
2020-04-30 05:21:43 +00:00
|
|
|
let slashing2 = ProposerSlashing {
|
|
|
|
signed_header_1: slashing1.signed_header_2.clone(),
|
|
|
|
signed_header_2: slashing1.signed_header_1.clone(),
|
|
|
|
};
|
|
|
|
|
2020-06-18 11:06:34 +00:00
|
|
|
// Both slashings should be valid and accepted by the pool.
|
2021-07-09 06:15:32 +00:00
|
|
|
op_pool
|
|
|
|
.insert_proposer_slashing(slashing1.clone().validate(&state, &harness.spec).unwrap());
|
|
|
|
op_pool
|
|
|
|
.insert_proposer_slashing(slashing2.clone().validate(&state, &harness.spec).unwrap());
|
2020-04-30 05:21:43 +00:00
|
|
|
|
|
|
|
// Should only get the second slashing back.
|
2021-07-09 06:15:32 +00:00
|
|
|
assert_eq!(op_pool.get_slashings(&state).0, vec![slashing2]);
|
2020-06-18 11:06:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sanity check on the pruning of proposer slashings
|
|
|
|
#[test]
|
|
|
|
fn prune_proposer_slashing_noop() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
|
|
|
let slashing = harness.make_proposer_slashing(0);
|
|
|
|
op_pool.insert_proposer_slashing(slashing.clone().validate(&state, &harness.spec).unwrap());
|
|
|
|
op_pool.prune_proposer_slashings(&state);
|
|
|
|
assert_eq!(op_pool.get_slashings(&state).0, vec![slashing]);
|
2020-06-18 11:06:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sanity check on the pruning of attester slashings
|
|
|
|
#[test]
|
|
|
|
fn prune_attester_slashing_noop() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
|
|
|
let slashing = harness.make_attester_slashing(vec![1, 3, 5, 7, 9]);
|
|
|
|
op_pool.insert_attester_slashing(
|
|
|
|
slashing.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
|
|
|
);
|
|
|
|
op_pool.prune_attester_slashings(&state);
|
|
|
|
assert_eq!(op_pool.get_slashings(&state).1, vec![slashing]);
|
2020-10-22 01:43:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we get maximum coverage for attester slashings (highest qty of validators slashed)
|
|
|
|
#[test]
|
|
|
|
fn simple_max_cover_attester_slashing() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
2020-10-22 01:43:54 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let slashing_1 = harness.make_attester_slashing(vec![1]);
|
|
|
|
let slashing_2 = harness.make_attester_slashing(vec![2, 3]);
|
|
|
|
let slashing_3 = harness.make_attester_slashing(vec![4, 5, 6]);
|
|
|
|
let slashing_4 = harness.make_attester_slashing(vec![7, 8, 9, 10]);
|
2020-10-22 01:43:54 +00:00
|
|
|
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_1.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_2.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_3.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_4.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let best_slashings = op_pool.get_slashings(&state);
|
2020-10-22 01:43:54 +00:00
|
|
|
assert_eq!(best_slashings.1, vec![slashing_4, slashing_3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we get maximum coverage for attester slashings with overlapping indices
|
|
|
|
#[test]
|
|
|
|
fn overlapping_max_cover_attester_slashing() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
2020-10-22 01:43:54 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3, 4]);
|
|
|
|
let slashing_2 = harness.make_attester_slashing(vec![1, 2, 5]);
|
|
|
|
let slashing_3 = harness.make_attester_slashing(vec![5, 6]);
|
|
|
|
let slashing_4 = harness.make_attester_slashing(vec![6]);
|
2020-10-22 01:43:54 +00:00
|
|
|
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_1.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_2.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_3.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_4.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let best_slashings = op_pool.get_slashings(&state);
|
2020-10-22 01:43:54 +00:00
|
|
|
assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Max coverage of attester slashings taking into account proposer slashings
|
|
|
|
#[test]
|
|
|
|
fn max_coverage_attester_proposer_slashings() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
2020-10-22 01:43:54 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let p_slashing = harness.make_proposer_slashing(1);
|
|
|
|
let a_slashing_1 = harness.make_attester_slashing(vec![1, 2, 3, 4]);
|
|
|
|
let a_slashing_2 = harness.make_attester_slashing(vec![1, 3, 4]);
|
|
|
|
let a_slashing_3 = harness.make_attester_slashing(vec![5, 6]);
|
2020-10-22 01:43:54 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
op_pool.insert_proposer_slashing(p_slashing.clone().validate(&state, spec).unwrap());
|
2020-10-22 01:43:54 +00:00
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
a_slashing_1.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
a_slashing_2.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
a_slashing_3.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let best_slashings = op_pool.get_slashings(&state);
|
2020-10-22 01:43:54 +00:00
|
|
|
assert_eq!(best_slashings.1, vec![a_slashing_1, a_slashing_3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
//Max coverage checking that non overlapping indices are still recognized for their value
|
|
|
|
#[test]
|
|
|
|
fn max_coverage_different_indices_set() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
|
|
|
|
let slashing_1 = harness.make_attester_slashing_different_indices(
|
|
|
|
vec![1, 2, 3, 4, 5, 6],
|
|
|
|
vec![3, 4, 5, 6, 7, 8],
|
|
|
|
);
|
|
|
|
let slashing_2 = harness.make_attester_slashing(vec![5, 6]);
|
|
|
|
let slashing_3 = harness.make_attester_slashing(vec![1, 2, 3]);
|
2020-10-22 01:43:54 +00:00
|
|
|
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_1.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_2.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_3.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let best_slashings = op_pool.get_slashings(&state);
|
2020-10-22 01:43:54 +00:00
|
|
|
assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
//Max coverage should be affected by the overall effective balances
|
|
|
|
#[test]
|
|
|
|
fn max_coverage_effective_balances() {
|
2021-07-15 00:52:02 +00:00
|
|
|
let harness = get_harness(32, None);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.spec;
|
|
|
|
let mut state = harness.get_current_state();
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
state.validators_mut()[1].effective_balance = 17_000_000_000;
|
|
|
|
state.validators_mut()[2].effective_balance = 17_000_000_000;
|
|
|
|
state.validators_mut()[3].effective_balance = 17_000_000_000;
|
|
|
|
|
|
|
|
let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3]);
|
|
|
|
let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]);
|
|
|
|
let slashing_3 = harness.make_attester_slashing(vec![7, 8]);
|
2020-10-22 01:43:54 +00:00
|
|
|
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_1.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_2.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
op_pool.insert_attester_slashing(
|
2021-07-09 06:15:32 +00:00
|
|
|
slashing_3.clone().validate(&state, spec).unwrap(),
|
|
|
|
state.fork(),
|
2020-10-22 01:43:54 +00:00
|
|
|
);
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let best_slashings = op_pool.get_slashings(&state);
|
2020-10-22 01:43:54 +00:00
|
|
|
assert_eq!(best_slashings.1, vec![slashing_2, slashing_3]);
|
2020-04-30 05:21:43 +00:00
|
|
|
}
|
2021-07-15 00:52:02 +00:00
|
|
|
|
|
|
|
/// End-to-end test of basic sync contribution handling.
|
|
|
|
#[test]
|
|
|
|
fn sync_contribution_aggregation_insert_get_prune() {
|
|
|
|
let (harness, _) = sync_contribution_test_state::<MainnetEthSpec>(1);
|
|
|
|
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
|
|
|
|
let block_root = *state
|
|
|
|
.get_block_root(state.slot() - Slot::new(1))
|
|
|
|
.ok()
|
|
|
|
.expect("block root should exist at slot");
|
|
|
|
let contributions = harness.make_sync_contributions(
|
|
|
|
&state,
|
|
|
|
block_root,
|
|
|
|
state.slot() - Slot::new(1),
|
|
|
|
RelativeSyncCommittee::Current,
|
|
|
|
);
|
|
|
|
|
|
|
|
for (_, contribution_and_proof) in contributions {
|
|
|
|
let contribution = contribution_and_proof
|
|
|
|
.expect("contribution exists for committee")
|
|
|
|
.message
|
|
|
|
.contribution;
|
|
|
|
op_pool.insert_sync_contribution(contribution).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(op_pool.sync_contributions.read().len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_sync_contributions(),
|
|
|
|
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
|
|
|
);
|
|
|
|
|
|
|
|
let sync_aggregate = op_pool
|
|
|
|
.get_sync_aggregate(&state)
|
|
|
|
.expect("Should calculate the sync aggregate")
|
|
|
|
.expect("Should have block sync aggregate");
|
|
|
|
assert_eq!(
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits(),
|
|
|
|
MainnetEthSpec::sync_committee_size()
|
|
|
|
);
|
|
|
|
|
|
|
|
// Prune sync contributions shouldn't do anything at this point.
|
|
|
|
op_pool.prune_sync_contributions(state.slot() - Slot::new(1));
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_sync_contributions(),
|
|
|
|
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
|
|
|
);
|
|
|
|
op_pool.prune_sync_contributions(state.slot());
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_sync_contributions(),
|
|
|
|
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
|
|
|
);
|
|
|
|
|
|
|
|
// But once we advance to more than one slot after the contribution, it should prune it
|
|
|
|
// out of existence.
|
|
|
|
op_pool.prune_sync_contributions(state.slot() + Slot::new(1));
|
|
|
|
assert_eq!(op_pool.num_sync_contributions(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adding a sync contribution already in the pool should not increase the size of the pool.
|
|
|
|
#[test]
|
|
|
|
fn sync_contribution_duplicate() {
|
|
|
|
let (harness, _) = sync_contribution_test_state::<MainnetEthSpec>(1);
|
|
|
|
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let block_root = *state
|
|
|
|
.get_block_root(state.slot() - Slot::new(1))
|
|
|
|
.ok()
|
|
|
|
.expect("block root should exist at slot");
|
|
|
|
let contributions = harness.make_sync_contributions(
|
|
|
|
&state,
|
|
|
|
block_root,
|
|
|
|
state.slot() - Slot::new(1),
|
|
|
|
RelativeSyncCommittee::Current,
|
|
|
|
);
|
|
|
|
|
|
|
|
for (_, contribution_and_proof) in contributions {
|
|
|
|
let contribution = contribution_and_proof
|
|
|
|
.expect("contribution exists for committee")
|
|
|
|
.message
|
|
|
|
.contribution;
|
|
|
|
op_pool
|
|
|
|
.insert_sync_contribution(contribution.clone())
|
|
|
|
.unwrap();
|
|
|
|
op_pool.insert_sync_contribution(contribution).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(op_pool.sync_contributions.read().len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
op_pool.num_sync_contributions(),
|
|
|
|
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adding a sync contribution already in the pool with more bits set should increase the
|
|
|
|
/// number of bits set in the aggregate.
|
|
|
|
#[test]
|
|
|
|
fn sync_contribution_with_more_bits() {
|
|
|
|
let (harness, _) = sync_contribution_test_state::<MainnetEthSpec>(1);
|
|
|
|
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let block_root = *state
|
|
|
|
.get_block_root(state.slot() - Slot::new(1))
|
|
|
|
.ok()
|
|
|
|
.expect("block root should exist at slot");
|
|
|
|
let contributions = harness.make_sync_contributions(
|
|
|
|
&state,
|
|
|
|
block_root,
|
|
|
|
state.slot() - Slot::new(1),
|
|
|
|
RelativeSyncCommittee::Current,
|
|
|
|
);
|
|
|
|
|
|
|
|
let expected_bits = MainnetEthSpec::sync_committee_size() - (2 * contributions.len());
|
|
|
|
let mut first_contribution = contributions[0]
|
|
|
|
.1
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.message
|
|
|
|
.contribution
|
|
|
|
.clone();
|
|
|
|
|
|
|
|
// Add all contributions, but unset the first two bits of each.
|
|
|
|
for (_, contribution_and_proof) in contributions {
|
|
|
|
let mut contribution_fewer_bits = contribution_and_proof
|
|
|
|
.expect("contribution exists for committee")
|
|
|
|
.message
|
|
|
|
.contribution;
|
|
|
|
|
|
|
|
// Unset the first two bits of each contribution.
|
|
|
|
contribution_fewer_bits
|
|
|
|
.aggregation_bits
|
|
|
|
.set(0, false)
|
|
|
|
.expect("set bit");
|
|
|
|
contribution_fewer_bits
|
|
|
|
.aggregation_bits
|
|
|
|
.set(1, false)
|
|
|
|
.expect("set bit");
|
|
|
|
|
|
|
|
op_pool
|
|
|
|
.insert_sync_contribution(contribution_fewer_bits)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let sync_aggregate = op_pool
|
|
|
|
.get_sync_aggregate(&state)
|
|
|
|
.expect("Should calculate the sync aggregate")
|
|
|
|
.expect("Should have block sync aggregate");
|
|
|
|
assert_eq!(
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits(),
|
|
|
|
expected_bits
|
|
|
|
);
|
|
|
|
|
|
|
|
// Unset the first bit of the first contribution and re-insert it. This should increase the
|
|
|
|
// number of bits set in the sync aggregate by one.
|
|
|
|
first_contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.set(0, false)
|
|
|
|
.expect("set bit");
|
|
|
|
op_pool
|
|
|
|
.insert_sync_contribution(first_contribution)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// The sync aggregate should now include the additional set bit.
|
|
|
|
let sync_aggregate = op_pool
|
|
|
|
.get_sync_aggregate(&state)
|
|
|
|
.expect("Should calculate the sync aggregate")
|
|
|
|
.expect("Should have block sync aggregate");
|
|
|
|
assert_eq!(
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits(),
|
|
|
|
expected_bits + 1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adding a sync contribution already in the pool with fewer bits set should not increase the
|
|
|
|
/// number of bits set in the aggregate.
|
|
|
|
#[test]
|
|
|
|
fn sync_contribution_with_fewer_bits() {
|
|
|
|
let (harness, _) = sync_contribution_test_state::<MainnetEthSpec>(1);
|
|
|
|
|
|
|
|
let op_pool = OperationPool::<MainnetEthSpec>::new();
|
|
|
|
let state = harness.get_current_state();
|
|
|
|
let block_root = *state
|
|
|
|
.get_block_root(state.slot() - Slot::new(1))
|
|
|
|
.ok()
|
|
|
|
.expect("block root should exist at slot");
|
|
|
|
let contributions = harness.make_sync_contributions(
|
|
|
|
&state,
|
|
|
|
block_root,
|
|
|
|
state.slot() - Slot::new(1),
|
|
|
|
RelativeSyncCommittee::Current,
|
|
|
|
);
|
|
|
|
|
|
|
|
let expected_bits = MainnetEthSpec::sync_committee_size() - (2 * contributions.len());
|
|
|
|
let mut first_contribution = contributions[0]
|
|
|
|
.1
|
|
|
|
.as_ref()
|
|
|
|
.unwrap()
|
|
|
|
.message
|
|
|
|
.contribution
|
|
|
|
.clone();
|
|
|
|
|
|
|
|
// Add all contributions, but unset the first two bits of each.
|
|
|
|
for (_, contribution_and_proof) in contributions {
|
|
|
|
let mut contribution_fewer_bits = contribution_and_proof
|
|
|
|
.expect("contribution exists for committee")
|
|
|
|
.message
|
|
|
|
.contribution;
|
|
|
|
|
|
|
|
// Unset the first two bits of each contribution.
|
|
|
|
contribution_fewer_bits
|
|
|
|
.aggregation_bits
|
|
|
|
.set(0, false)
|
|
|
|
.expect("set bit");
|
|
|
|
contribution_fewer_bits
|
|
|
|
.aggregation_bits
|
|
|
|
.set(1, false)
|
|
|
|
.expect("set bit");
|
|
|
|
|
|
|
|
op_pool
|
|
|
|
.insert_sync_contribution(contribution_fewer_bits)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let sync_aggregate = op_pool
|
|
|
|
.get_sync_aggregate(&state)
|
|
|
|
.expect("Should calculate the sync aggregate")
|
|
|
|
.expect("Should have block sync aggregate");
|
|
|
|
assert_eq!(
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits(),
|
|
|
|
expected_bits
|
|
|
|
);
|
|
|
|
|
|
|
|
// Unset the first three bits of the first contribution and re-insert it. This should
|
|
|
|
// not affect the number of bits set in the sync aggregate.
|
|
|
|
first_contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.set(0, false)
|
|
|
|
.expect("set bit");
|
|
|
|
first_contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.set(1, false)
|
|
|
|
.expect("set bit");
|
|
|
|
first_contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.set(2, false)
|
|
|
|
.expect("set bit");
|
|
|
|
op_pool
|
|
|
|
.insert_sync_contribution(first_contribution)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// The sync aggregate should still have the same number of set bits.
|
|
|
|
let sync_aggregate = op_pool
|
|
|
|
.get_sync_aggregate(&state)
|
|
|
|
.expect("Should calculate the sync aggregate")
|
|
|
|
.expect("Should have block sync aggregate");
|
|
|
|
assert_eq!(
|
|
|
|
sync_aggregate.sync_committee_bits.num_set_bits(),
|
|
|
|
expected_bits
|
|
|
|
);
|
|
|
|
}
|
2019-03-06 03:46:12 +00:00
|
|
|
}
|