2020-05-17 11:16:48 +00:00
|
|
|
use crate::metrics;
|
2020-03-25 10:14:05 +00:00
|
|
|
use std::collections::HashMap;
|
2020-09-29 03:46:54 +00:00
|
|
|
use tree_hash::TreeHash;
|
2021-07-15 00:52:02 +00:00
|
|
|
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
|
|
|
use types::slot_data::SlotData;
|
|
|
|
use types::sync_committee_contribution::SyncContributionData;
|
|
|
|
use types::{Attestation, AttestationData, EthSpec, Hash256, Slot, SyncCommitteeContribution};
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
type AttestationDataRoot = Hash256;
|
2021-07-15 00:52:02 +00:00
|
|
|
type SyncDataRoot = Hash256;
|
|
|
|
|
2020-03-25 10:14:05 +00:00
|
|
|
/// The number of slots that will be stored in the pool.
|
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all items
|
|
|
|
/// at slots less than `4` will be dropped and any future item with a slot less than `4`
|
2020-03-25 10:14:05 +00:00
|
|
|
/// will be refused.
|
|
|
|
const SLOTS_RETAINED: usize = 3;
|
|
|
|
|
|
|
|
/// The maximum number of distinct `AttestationData` that will be stored in each slot.
|
|
|
|
///
|
|
|
|
/// This is a DoS protection measure.
|
|
|
|
const MAX_ATTESTATIONS_PER_SLOT: usize = 16_384;
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Returned upon successfully inserting an item into the pool.
|
2020-03-25 10:14:05 +00:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum InsertOutcome {
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The item had not been seen before and was added to the pool.
|
|
|
|
NewItemInserted { committee_index: usize },
|
|
|
|
/// A validator signature for the given item's `Data` was already known. No changes were
|
2020-03-25 10:14:05 +00:00
|
|
|
/// made.
|
|
|
|
SignatureAlreadyKnown { committee_index: usize },
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The item's `Data` was known, but a signature for the given validator was not yet
|
2020-03-25 10:14:05 +00:00
|
|
|
/// known. The signature was aggregated into the pool.
|
|
|
|
SignatureAggregated { committee_index: usize },
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum Error {
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The given `data.slot` was too low to be stored. No changes were made.
|
2020-03-25 10:14:05 +00:00
|
|
|
SlotTooLow {
|
|
|
|
slot: Slot,
|
|
|
|
lowest_permissible_slot: Slot,
|
|
|
|
},
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The given `aggregation_bits` field was empty.
|
2020-03-25 10:14:05 +00:00
|
|
|
NoAggregationBitsSet,
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The given `aggregation_bits` field had more than one signature. The number of
|
2020-03-25 10:14:05 +00:00
|
|
|
/// signatures found is included.
|
|
|
|
MoreThanOneAggregationBitSet(usize),
|
2021-07-15 00:52:02 +00:00
|
|
|
/// We have reached the maximum number of unique items that can be stored in a
|
2020-03-25 10:14:05 +00:00
|
|
|
/// slot. This is a DoS protection function.
|
2021-07-15 00:52:02 +00:00
|
|
|
ReachedMaxItemsPerSlot(usize),
|
|
|
|
/// The given `aggregation_bits` field had a different length to the one currently
|
2020-03-25 10:14:05 +00:00
|
|
|
/// stored. This indicates a fairly serious error somewhere in the code that called this
|
|
|
|
/// function.
|
|
|
|
InconsistentBitfieldLengths,
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The given item was for the incorrect slot. This is an internal error.
|
|
|
|
IncorrectSlot { expected: Slot, actual: Slot },
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Implemented for items in the `NaiveAggregationPool`. Requires that items implement `SlotData`,
|
|
|
|
/// which means they have an associated slot. This handles aggregation of items that are inserted.
|
|
|
|
pub trait AggregateMap {
|
|
|
|
/// `Key` should be a hash of `Data`.
|
|
|
|
type Key;
|
|
|
|
|
|
|
|
/// The item stored in the map
|
|
|
|
type Value: Clone + SlotData;
|
|
|
|
|
|
|
|
/// The unique fields of `Value`, hashed to create `Key`.
|
|
|
|
type Data: SlotData;
|
|
|
|
|
|
|
|
/// Create a new `AggregateMap` with capacity `initial_capacity`.
|
|
|
|
fn new(initial_capacity: usize) -> Self;
|
|
|
|
|
|
|
|
/// Insert a `Value` into `Self`, returning a result.
|
|
|
|
fn insert(&mut self, value: &Self::Value) -> Result<InsertOutcome, Error>;
|
|
|
|
|
|
|
|
/// Get a `Value` from `Self` based on `Data`.
|
|
|
|
fn get(&self, data: &Self::Data) -> Option<Self::Value>;
|
|
|
|
|
|
|
|
/// Get a reference to the inner `HashMap`.
|
|
|
|
fn get_map(&self) -> &HashMap<Self::Key, Self::Value>;
|
|
|
|
|
|
|
|
/// Get a `Value` from `Self` based on `Key`, which is a hash of `Data`.
|
|
|
|
fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value>;
|
|
|
|
|
|
|
|
/// The number of items store in `Self`.
|
|
|
|
fn len(&self) -> usize;
|
|
|
|
|
|
|
|
/// Start a timer observing inserts.
|
|
|
|
fn start_insert_timer() -> Option<metrics::HistogramTimer>;
|
|
|
|
|
|
|
|
/// Start a timer observing the time it takes to create a new map for a new slot.
|
|
|
|
fn start_create_map_timer() -> Option<metrics::HistogramTimer>;
|
|
|
|
|
|
|
|
/// Start a timer observing the time it takes to prune the pool.
|
|
|
|
fn start_prune_timer() -> Option<metrics::HistogramTimer>;
|
|
|
|
|
|
|
|
/// The default capacity of `Self`.
|
|
|
|
fn default_capacity() -> usize;
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
|
|
|
|
/// `attestation` are from the same slot.
|
2021-07-15 00:52:02 +00:00
|
|
|
pub struct AggregatedAttestationMap<E: EthSpec> {
|
2020-09-29 03:46:54 +00:00
|
|
|
map: HashMap<AttestationDataRoot, Attestation<E>>,
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
impl<E: EthSpec> AggregateMap for AggregatedAttestationMap<E> {
|
|
|
|
type Key = AttestationDataRoot;
|
|
|
|
type Value = Attestation<E>;
|
|
|
|
type Data = AttestationData;
|
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
/// Create an empty collection with the given `initial_capacity`.
|
2021-07-15 00:52:02 +00:00
|
|
|
fn new(initial_capacity: usize) -> Self {
|
2020-03-25 10:14:05 +00:00
|
|
|
Self {
|
2020-05-06 11:42:56 +00:00
|
|
|
map: HashMap::with_capacity(initial_capacity),
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert an attestation into `self`, aggregating it into the pool.
|
|
|
|
///
|
2020-05-06 11:42:56 +00:00
|
|
|
/// The given attestation (`a`) must only have one signature.
|
2021-07-15 00:52:02 +00:00
|
|
|
fn insert(&mut self, a: &Self::Value) -> Result<InsertOutcome, Error> {
|
2020-05-17 11:16:48 +00:00
|
|
|
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT);
|
|
|
|
|
2020-03-25 10:14:05 +00:00
|
|
|
let set_bits = a
|
|
|
|
.aggregation_bits
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.filter(|(_i, bit)| *bit)
|
|
|
|
.map(|(i, _bit)| i)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let committee_index = set_bits
|
|
|
|
.first()
|
|
|
|
.copied()
|
2020-12-03 01:10:26 +00:00
|
|
|
.ok_or(Error::NoAggregationBitsSet)?;
|
2020-03-25 10:14:05 +00:00
|
|
|
|
|
|
|
if set_bits.len() > 1 {
|
|
|
|
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
let attestation_data_root = a.data.tree_hash_root();
|
|
|
|
|
|
|
|
if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) {
|
2020-03-25 10:14:05 +00:00
|
|
|
if existing_attestation
|
|
|
|
.aggregation_bits
|
|
|
|
.get(committee_index)
|
|
|
|
.map_err(|_| Error::InconsistentBitfieldLengths)?
|
|
|
|
{
|
|
|
|
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index })
|
|
|
|
} else {
|
2020-05-17 11:16:48 +00:00
|
|
|
let _timer =
|
|
|
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION);
|
2020-03-25 10:14:05 +00:00
|
|
|
existing_attestation.aggregate(a);
|
|
|
|
Ok(InsertOutcome::SignatureAggregated { committee_index })
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT {
|
2021-07-15 00:52:02 +00:00
|
|
|
return Err(Error::ReachedMaxItemsPerSlot(MAX_ATTESTATIONS_PER_SLOT));
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
self.map.insert(attestation_data_root, a.clone());
|
2021-07-15 00:52:02 +00:00
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index })
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
|
|
|
///
|
|
|
|
/// The given `a.data.slot` must match the slot that `self` was initialized with.
|
2021-07-15 00:52:02 +00:00
|
|
|
fn get(&self, data: &Self::Data) -> Option<Self::Value> {
|
2020-09-29 03:46:54 +00:00
|
|
|
self.map.get(&data.tree_hash_root()).cloned()
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn get_map(&self) -> &HashMap<Self::Key, Self::Value> {
|
|
|
|
&self.map
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/// Returns an aggregated `Attestation` with the given `root`, if any.
|
2021-07-15 00:52:02 +00:00
|
|
|
fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value> {
|
2020-09-29 03:46:54 +00:00
|
|
|
self.map.get(root)
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.map.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_insert_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_INSERT)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_create_map_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CREATE_MAP)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_prune_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_PRUNE)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Use the `TARGET_COMMITTEE_SIZE`.
|
|
|
|
///
|
|
|
|
/// Note: hard-coded until `TARGET_COMMITTEE_SIZE` is available via `EthSpec`.
|
|
|
|
fn default_capacity() -> usize {
|
|
|
|
128
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A collection of `SyncCommitteeContribution`, keyed by their `SyncContributionData`. Enforces that all
|
|
|
|
/// contributions are from the same slot.
|
|
|
|
pub struct SyncContributionAggregateMap<E: EthSpec> {
|
|
|
|
map: HashMap<SyncDataRoot, SyncCommitteeContribution<E>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<E: EthSpec> AggregateMap for SyncContributionAggregateMap<E> {
|
|
|
|
type Key = SyncDataRoot;
|
|
|
|
type Value = SyncCommitteeContribution<E>;
|
|
|
|
type Data = SyncContributionData;
|
|
|
|
|
|
|
|
/// Create an empty collection with the given `initial_capacity`.
|
|
|
|
fn new(initial_capacity: usize) -> Self {
|
|
|
|
Self {
|
|
|
|
map: HashMap::with_capacity(initial_capacity),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert a sync committee contribution into `self`, aggregating it into the pool.
|
|
|
|
///
|
|
|
|
/// The given sync contribution must only have one signature.
|
|
|
|
fn insert(
|
|
|
|
&mut self,
|
|
|
|
contribution: &SyncCommitteeContribution<E>,
|
|
|
|
) -> Result<InsertOutcome, Error> {
|
|
|
|
let _timer =
|
|
|
|
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CORE_INSERT);
|
|
|
|
|
|
|
|
let set_bits = contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.filter(|(_i, bit)| *bit)
|
|
|
|
.map(|(i, _bit)| i)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let committee_index = set_bits
|
|
|
|
.first()
|
|
|
|
.copied()
|
|
|
|
.ok_or(Error::NoAggregationBitsSet)?;
|
|
|
|
|
|
|
|
if set_bits.len() > 1 {
|
|
|
|
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
|
|
|
}
|
|
|
|
|
|
|
|
let sync_data_root = SyncContributionData::from_contribution(contribution).tree_hash_root();
|
|
|
|
|
|
|
|
if let Some(existing_contribution) = self.map.get_mut(&sync_data_root) {
|
|
|
|
if existing_contribution
|
|
|
|
.aggregation_bits
|
|
|
|
.get(committee_index)
|
|
|
|
.map_err(|_| Error::InconsistentBitfieldLengths)?
|
|
|
|
{
|
|
|
|
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index })
|
|
|
|
} else {
|
|
|
|
let _timer = metrics::start_timer(
|
|
|
|
&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_AGGREGATION,
|
|
|
|
);
|
|
|
|
existing_contribution.aggregate(contribution);
|
|
|
|
Ok(InsertOutcome::SignatureAggregated { committee_index })
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if self.map.len() >= E::sync_committee_size() {
|
|
|
|
return Err(Error::ReachedMaxItemsPerSlot(E::sync_committee_size()));
|
|
|
|
}
|
|
|
|
|
|
|
|
self.map.insert(sync_data_root, contribution.clone());
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index })
|
|
|
|
}
|
2020-08-06 07:26:46 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Returns an aggregated `SyncCommitteeContribution` with the given `data`, if any.
|
|
|
|
///
|
|
|
|
/// The given `data.slot` must match the slot that `self` was initialized with.
|
|
|
|
fn get(&self, data: &SyncContributionData) -> Option<SyncCommitteeContribution<E>> {
|
|
|
|
self.map.get(&data.tree_hash_root()).cloned()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_map(&self) -> &HashMap<SyncDataRoot, SyncCommitteeContribution<E>> {
|
|
|
|
&self.map
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an aggregated `SyncCommitteeContribution` with the given `root`, if any.
|
|
|
|
fn get_by_root(&self, root: &SyncDataRoot) -> Option<&SyncCommitteeContribution<E>> {
|
|
|
|
self.map.get(root)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn len(&self) -> usize {
|
2020-05-06 11:42:56 +00:00
|
|
|
self.map.len()
|
|
|
|
}
|
2021-07-15 00:52:02 +00:00
|
|
|
|
|
|
|
fn start_insert_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_INSERT)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_create_map_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_CREATE_MAP)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn start_prune_timer() -> Option<metrics::HistogramTimer> {
|
|
|
|
metrics::start_timer(&metrics::SYNC_CONTRIBUTION_PROCESSING_AGG_POOL_PRUNE)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Default to `SYNC_COMMITTEE_SUBNET_COUNT`.
|
|
|
|
fn default_capacity() -> usize {
|
|
|
|
SYNC_COMMITTEE_SUBNET_COUNT as usize
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// A pool of `Attestation` or `SyncCommitteeContribution` that is specially designed to store
|
|
|
|
/// "unaggregated" messages from the native aggregation scheme.
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// **The `NaiveAggregationPool` does not do any verification. It assumes that all `Attestation`
|
|
|
|
/// or `SyncCommitteeContribution` objects provided are valid.**
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
|
|
|
/// ## Details
|
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The pool sorts the items by `slot`, then by `Data`.
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// As each item is added it is aggregated with any existing item with the same `Data`. Considering
|
|
|
|
/// that the pool only accepts attestations or sync contributions with a single
|
2020-03-25 10:14:05 +00:00
|
|
|
/// signature, there should only ever be a single aggregated `Attestation` for any given
|
2021-07-15 00:52:02 +00:00
|
|
|
/// `AttestationData` or a single `SyncCommitteeContribution` for any given `SyncContributionData`.
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `slot` is
|
2020-03-25 10:14:05 +00:00
|
|
|
/// provided, the oldest slot is dropped and replaced with the new slot. The pool can also be
|
2021-07-15 00:52:02 +00:00
|
|
|
/// pruned by supplying a `current_slot`; all existing items with a slot lower than
|
|
|
|
/// `current_slot - SLOTS_RETAINED` will be removed and any future item with a slot lower
|
|
|
|
/// than that will also be refused. Pruning is done automatically based upon the items it
|
2020-03-25 10:14:05 +00:00
|
|
|
/// receives and it can be triggered manually.
|
2021-07-15 00:52:02 +00:00
|
|
|
pub struct NaiveAggregationPool<T: AggregateMap> {
|
2020-08-06 07:26:46 +00:00
|
|
|
lowest_permissible_slot: Slot,
|
2021-07-15 00:52:02 +00:00
|
|
|
maps: HashMap<Slot, T>,
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
impl<T: AggregateMap> Default for NaiveAggregationPool<T> {
|
2020-03-25 10:14:05 +00:00
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
2020-08-06 07:26:46 +00:00
|
|
|
lowest_permissible_slot: Slot::new(0),
|
|
|
|
maps: HashMap::new(),
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
impl<T: AggregateMap> NaiveAggregationPool<T> {
|
|
|
|
/// Insert an item into `self`, aggregating it into the pool.
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The given item must only have one signature and have an
|
|
|
|
/// `slot` that is not lower than `self.lowest_permissible_slot`.
|
2020-03-25 10:14:05 +00:00
|
|
|
///
|
2021-07-15 00:52:02 +00:00
|
|
|
/// The pool may be pruned if the given item has a slot higher than any
|
2020-03-25 10:14:05 +00:00
|
|
|
/// previously seen.
|
2021-07-15 00:52:02 +00:00
|
|
|
pub fn insert(&mut self, item: &T::Value) -> Result<InsertOutcome, Error> {
|
|
|
|
let _timer = T::start_insert_timer();
|
|
|
|
let slot = item.get_slot();
|
2020-08-06 07:26:46 +00:00
|
|
|
let lowest_permissible_slot = self.lowest_permissible_slot;
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
// Reject any items that are too old.
|
2020-05-06 11:42:56 +00:00
|
|
|
if slot < lowest_permissible_slot {
|
2020-03-25 10:14:05 +00:00
|
|
|
return Err(Error::SlotTooLow {
|
2020-05-06 11:42:56 +00:00
|
|
|
slot,
|
2020-03-25 10:14:05 +00:00
|
|
|
lowest_permissible_slot,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-08-06 07:26:46 +00:00
|
|
|
let outcome = if let Some(map) = self.maps.get_mut(&slot) {
|
2021-07-15 00:52:02 +00:00
|
|
|
map.insert(item)
|
2020-05-06 11:42:56 +00:00
|
|
|
} else {
|
2021-07-15 00:52:02 +00:00
|
|
|
let _timer = T::start_create_map_timer();
|
2020-05-06 11:42:56 +00:00
|
|
|
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
|
|
|
// by obtaining the mean size of all items in earlier epoch.
|
2020-08-06 07:26:46 +00:00
|
|
|
let (count, sum) = self
|
|
|
|
.maps
|
2020-05-06 11:42:56 +00:00
|
|
|
.iter()
|
|
|
|
// Only include epochs that are less than the given slot in the average. This should
|
|
|
|
// generally avoid including recent epochs that are still "filling up".
|
|
|
|
.filter(|(map_slot, _item)| **map_slot < slot)
|
|
|
|
.map(|(_slot, map)| map.len())
|
|
|
|
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let mut aggregate_map = T::new(initial_capacity);
|
|
|
|
let outcome = aggregate_map.insert(item);
|
|
|
|
self.maps.insert(slot, aggregate_map);
|
2020-05-06 11:42:56 +00:00
|
|
|
|
|
|
|
outcome
|
|
|
|
};
|
|
|
|
|
|
|
|
self.prune(slot);
|
|
|
|
|
|
|
|
outcome
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Returns the total number of items stored in `self`.
|
|
|
|
pub fn num_items(&self) -> usize {
|
Handle early blocks (#2155)
## Issue Addressed
NA
## Problem this PR addresses
There's an issue where Lighthouse is banning a lot of peers due to the following sequence of events:
1. Gossip block 0xabc arrives ~200ms early
- It is propagated across the network, with respect to [`MAXIMUM_GOSSIP_CLOCK_DISPARITY`](https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/p2p-interface.md#why-is-there-maximum_gossip_clock_disparity-when-validating-slot-ranges-of-messages-in-gossip-subnets).
- However, it is not imported to our database since the block is early.
2. Attestations for 0xabc arrive, but the block was not imported.
- The peer that sent the attestation is down-voted.
- Each unknown-block attestation causes a score loss of 1, the peer is banned at -100.
- When the peer is on an attestation subnet there can be hundreds of attestations, so the peer is banned quickly (before the missed block can be obtained via rpc).
## Potential solutions
I can think of three solutions to this:
1. Wait for attestation-queuing (#635) to arrive and solve this.
- Easy
- Not immediate fix.
- Whilst this would work, I don't think it's a perfect solution for this particular issue, rather (3) is better.
1. Allow importing blocks with a tolerance of `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
- Easy
- ~~I have implemented this, for now.~~
1. If a block is verified for gossip propagation (i.e., signature verified) and it's within `MAXIMUM_GOSSIP_CLOCK_DISPARITY`, then queue it to be processed at the start of the appropriate slot.
- More difficult
- Feels like the best solution, I will try to implement this.
**This PR takes approach (3).**
## Changes included
- Implement the `block_delay_queue`, based upon a [`DelayQueue`](https://docs.rs/tokio-util/0.6.3/tokio_util/time/delay_queue/struct.DelayQueue.html) which can store blocks until it's time to import them.
- Add a new `DelayedImportBlock` variant to the `beacon_processor::WorkEvent` enum to handle this new event.
- In the `BeaconProcessor`, refactor a `tokio::select!` to a struct with an explicit `Stream` implementation. I experienced some issues with `tokio::select!` in the block delay queue and I also found it hard to debug. I think this explicit implementation is nicer and functionally equivalent (apart from the fact that `tokio::select!` randomly chooses futures to poll, whereas now we're deterministic).
- Add a testing framework to the `beacon_processor` module that tests this new block delay logic. I also tested a handful of other operations in the beacon processor (attns, slashings, exits) since it was super easy to copy-pasta the code from the `http_api` tester.
- To implement these tests I added the concept of an optional `work_journal_tx` to the `BeaconProcessor` which will spit out a log of events. I used this in the tests to ensure that things were happening as I expect.
- The tests are a little racey, but it's hard to avoid that when testing timing-based code. If we see CI failures I can revise. I haven't observed *any* failures due to races on my machine or on CI yet.
- To assist with testing I allowed for directly setting the time on the `ManualSlotClock`.
- I gave the `beacon_processor::Worker` a `Toolbox` for two reasons; (a) it avoids changing tons of function sigs when you want to pass a new object to the worker and (b) it seemed cute.
2021-02-24 03:08:52 +00:00
|
|
|
self.maps.iter().map(|(_, map)| map.len()).sum()
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
|
|
|
pub fn get(&self, data: &T::Data) -> Option<T::Value> {
|
|
|
|
self.maps
|
|
|
|
.get(&data.get_slot())
|
|
|
|
.and_then(|map| map.get(data))
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Returns an aggregated `T::Value` with the given `slot` and `root`, if any.
|
|
|
|
pub fn get_by_slot_and_root(&self, slot: Slot, root: &T::Key) -> Option<T::Value> {
|
2020-03-25 10:14:05 +00:00
|
|
|
self.maps
|
2020-09-29 03:46:54 +00:00
|
|
|
.get(&slot)
|
|
|
|
.and_then(|map| map.get_by_root(root).cloned())
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Iterate all items in all slots of `self`.
|
|
|
|
pub fn iter(&self) -> impl Iterator<Item = &T::Value> {
|
|
|
|
self.maps
|
|
|
|
.iter()
|
|
|
|
.map(|(_slot, map)| map.get_map().iter().map(|(_key, value)| value))
|
|
|
|
.flatten()
|
2020-08-06 07:26:46 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Removes any items with a slot lower than `current_slot` and bars any future
|
|
|
|
/// items with a slot lower than `current_slot - SLOTS_RETAINED`.
|
2020-08-06 07:26:46 +00:00
|
|
|
pub fn prune(&mut self, current_slot: Slot) {
|
2021-07-15 00:52:02 +00:00
|
|
|
let _timer = T::start_prune_timer();
|
2020-05-17 11:16:48 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let lowest_permissible_slot = current_slot.saturating_sub(Slot::from(SLOTS_RETAINED));
|
2020-05-17 11:16:48 +00:00
|
|
|
|
|
|
|
// No need to prune if the lowest permissible slot has not changed and the queue length is
|
|
|
|
// less than the maximum
|
2020-08-06 07:26:46 +00:00
|
|
|
if self.lowest_permissible_slot == lowest_permissible_slot
|
|
|
|
&& self.maps.len() <= SLOTS_RETAINED
|
2020-05-17 11:16:48 +00:00
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-06 07:26:46 +00:00
|
|
|
self.lowest_permissible_slot = lowest_permissible_slot;
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
// Remove any maps that are definitely expired.
|
2020-08-06 07:26:46 +00:00
|
|
|
self.maps
|
|
|
|
.retain(|slot, _map| *slot >= lowest_permissible_slot);
|
2020-05-06 11:42:56 +00:00
|
|
|
|
|
|
|
// If we have too many maps, remove the lowest amount to ensure we only have
|
|
|
|
// `SLOTS_RETAINED` left.
|
2020-08-06 07:26:46 +00:00
|
|
|
if self.maps.len() > SLOTS_RETAINED {
|
|
|
|
let mut slots = self
|
|
|
|
.maps
|
|
|
|
.iter()
|
|
|
|
.map(|(slot, _map)| *slot)
|
|
|
|
.collect::<Vec<_>>();
|
2020-05-06 11:42:56 +00:00
|
|
|
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
|
|
|
// negligible.
|
|
|
|
slots.sort_unstable();
|
|
|
|
slots
|
|
|
|
.into_iter()
|
2020-08-06 07:26:46 +00:00
|
|
|
.take(self.maps.len().saturating_sub(SLOTS_RETAINED))
|
2020-05-06 11:42:56 +00:00
|
|
|
.for_each(|slot| {
|
2020-08-06 07:26:46 +00:00
|
|
|
self.maps.remove(&slot);
|
2020-05-06 11:42:56 +00:00
|
|
|
})
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use ssz_types::BitList;
|
2021-07-15 00:52:02 +00:00
|
|
|
use store::BitVector;
|
2020-03-25 10:14:05 +00:00
|
|
|
use types::{
|
|
|
|
test_utils::{generate_deterministic_keypair, test_random_instance},
|
2021-07-15 00:52:02 +00:00
|
|
|
Fork, Hash256, SyncCommitteeMessage,
|
2020-03-25 10:14:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
type E = types::MainnetEthSpec;
|
|
|
|
|
|
|
|
fn get_attestation(slot: Slot) -> Attestation<E> {
|
|
|
|
let mut a: Attestation<E> = test_random_instance();
|
|
|
|
a.data.slot = slot;
|
|
|
|
a.aggregation_bits = BitList::with_capacity(4).expect("should create bitlist");
|
|
|
|
a
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn get_sync_contribution(slot: Slot) -> SyncCommitteeContribution<E> {
|
|
|
|
let mut a: SyncCommitteeContribution<E> = test_random_instance();
|
|
|
|
a.slot = slot;
|
|
|
|
a.aggregation_bits = BitVector::new();
|
|
|
|
a
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sign_attestation(a: &mut Attestation<E>, i: usize, genesis_validators_root: Hash256) {
|
2020-03-25 10:14:05 +00:00
|
|
|
a.sign(
|
|
|
|
&generate_deterministic_keypair(i).sk,
|
|
|
|
i,
|
|
|
|
&Fork::default(),
|
2020-04-08 06:46:37 +00:00
|
|
|
genesis_validators_root,
|
2020-03-25 10:14:05 +00:00
|
|
|
&E::default_spec(),
|
|
|
|
)
|
|
|
|
.expect("should sign attestation");
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn sign_sync_contribution(
|
|
|
|
a: &mut SyncCommitteeContribution<E>,
|
|
|
|
i: usize,
|
|
|
|
genesis_validators_root: Hash256,
|
|
|
|
) {
|
|
|
|
let sync_message = SyncCommitteeMessage::new::<E>(
|
|
|
|
a.slot,
|
|
|
|
a.beacon_block_root,
|
|
|
|
i as u64,
|
|
|
|
&generate_deterministic_keypair(i).sk,
|
|
|
|
&Fork::default(),
|
|
|
|
genesis_validators_root,
|
|
|
|
&E::default_spec(),
|
|
|
|
);
|
|
|
|
let signed_contribution: SyncCommitteeContribution<E> =
|
|
|
|
SyncCommitteeContribution::from_message(&sync_message, a.subcommittee_index, i)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
a.aggregate(&signed_contribution);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn unset_attestation_bit(a: &mut Attestation<E>, i: usize) {
|
2020-03-25 10:14:05 +00:00
|
|
|
a.aggregation_bits
|
|
|
|
.set(i, false)
|
|
|
|
.expect("should unset aggregation bit")
|
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn unset_sync_contribution_bit(a: &mut SyncCommitteeContribution<E>, i: usize) {
|
|
|
|
a.aggregation_bits
|
|
|
|
.set(i, false)
|
|
|
|
.expect("should unset aggregation bit")
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn mutate_attestation_block_root(a: &mut Attestation<E>, block_root: Hash256) {
|
|
|
|
a.data.beacon_block_root = block_root
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn mutate_attestation_slot(a: &mut Attestation<E>, slot: Slot) {
|
|
|
|
a.data.slot = slot
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn attestation_block_root_comparator(a: &Attestation<E>, block_root: Hash256) -> bool {
|
|
|
|
a.data.beacon_block_root == block_root
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn key_from_attestation(a: &Attestation<E>) -> AttestationData {
|
|
|
|
a.data.clone()
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn mutate_sync_contribution_block_root(
|
|
|
|
a: &mut SyncCommitteeContribution<E>,
|
|
|
|
block_root: Hash256,
|
|
|
|
) {
|
|
|
|
a.beacon_block_root = block_root
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn mutate_sync_contribution_slot(a: &mut SyncCommitteeContribution<E>, slot: Slot) {
|
|
|
|
a.slot = slot
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn sync_contribution_block_root_comparator(
|
|
|
|
a: &SyncCommitteeContribution<E>,
|
|
|
|
block_root: Hash256,
|
|
|
|
) -> bool {
|
|
|
|
a.beacon_block_root == block_root
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
fn key_from_sync_contribution(a: &SyncCommitteeContribution<E>) -> SyncContributionData {
|
|
|
|
SyncContributionData::from_contribution(&a)
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
macro_rules! test_suite {
|
|
|
|
(
|
|
|
|
$mod_name: ident,
|
|
|
|
$get_method_name: ident,
|
|
|
|
$sign_method_name: ident,
|
|
|
|
$unset_method_name: ident,
|
|
|
|
$block_root_mutator: ident,
|
|
|
|
$slot_mutator: ident,
|
|
|
|
$block_root_comparator: ident,
|
|
|
|
$key_getter: ident,
|
|
|
|
$map_type: ident,
|
|
|
|
$item_limit: expr
|
|
|
|
) => {
|
|
|
|
#[cfg(test)]
|
|
|
|
mod $mod_name {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_item() {
|
|
|
|
let mut a = $get_method_name(Slot::new(0));
|
|
|
|
|
|
|
|
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
|
|
|
NaiveAggregationPool::default();
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Err(Error::NoAggregationBitsSet),
|
|
|
|
"should not accept item without any signatures"
|
|
|
|
);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
$sign_method_name(&mut a, 0, Hash256::random());
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
|
|
|
"should accept new item"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }),
|
|
|
|
"should acknowledge duplicate signature"
|
|
|
|
);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let retrieved = pool
|
|
|
|
.get(&$key_getter(&a))
|
|
|
|
.expect("should not error while getting item");
|
|
|
|
assert_eq!(retrieved, a, "retrieved item should equal the one inserted");
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
$sign_method_name(&mut a, 1, Hash256::random());
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Err(Error::MoreThanOneAggregationBitSet(2)),
|
|
|
|
"should not accept item with multiple signatures"
|
|
|
|
);
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
#[test]
|
|
|
|
fn multiple_items() {
|
|
|
|
let mut a_0 = $get_method_name(Slot::new(0));
|
|
|
|
let mut a_1 = a_0.clone();
|
|
|
|
|
|
|
|
let genesis_validators_root = Hash256::random();
|
|
|
|
$sign_method_name(&mut a_0, 0, genesis_validators_root);
|
|
|
|
$sign_method_name(&mut a_1, 1, genesis_validators_root);
|
|
|
|
|
|
|
|
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
|
|
|
NaiveAggregationPool::default();
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a_0),
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
|
|
|
"should accept a_0"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a_1),
|
|
|
|
Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }),
|
|
|
|
"should accept a_1"
|
|
|
|
);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let retrieved = pool
|
|
|
|
.get(&$key_getter(&a_0))
|
|
|
|
.expect("should not error while getting attestation");
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let mut a_01 = a_0.clone();
|
|
|
|
a_01.aggregate(&a_1);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(retrieved, a_01, "retrieved item should be aggregated");
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/*
|
|
|
|
* Throw different data in there and ensure it isn't aggregated
|
|
|
|
*/
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
let mut a_different = a_0.clone();
|
|
|
|
let different_root = Hash256::from_low_u64_be(1337);
|
|
|
|
$unset_method_name(&mut a_different, 0);
|
|
|
|
$sign_method_name(&mut a_different, 2, genesis_validators_root);
|
|
|
|
assert!(!$block_root_comparator(&a_different, different_root));
|
|
|
|
$block_root_mutator(&mut a_different, different_root);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a_different),
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index: 2 }),
|
|
|
|
"should accept a_different"
|
|
|
|
);
|
2020-03-25 10:14:05 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-15 00:52:02 +00:00
|
|
|
pool.get(&$key_getter(&a_0))
|
|
|
|
.expect("should not error while getting item"),
|
|
|
|
retrieved,
|
|
|
|
"should not have aggregated different items with different data"
|
|
|
|
);
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
#[test]
|
|
|
|
fn auto_pruning_item() {
|
|
|
|
let mut base = $get_method_name(Slot::new(0));
|
|
|
|
$sign_method_name(&mut base, 0, Hash256::random());
|
|
|
|
|
|
|
|
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
|
|
|
NaiveAggregationPool::default();
|
|
|
|
|
|
|
|
for i in 0..SLOTS_RETAINED * 2 {
|
|
|
|
let slot = Slot::from(i);
|
|
|
|
let mut a = base.clone();
|
|
|
|
$slot_mutator(&mut a, slot);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
|
|
|
"should accept new item"
|
|
|
|
);
|
|
|
|
|
|
|
|
if i < SLOTS_RETAINED {
|
|
|
|
let len = i + 1;
|
|
|
|
assert_eq!(pool.maps.len(), len, "the pool should have length {}", len);
|
|
|
|
} else {
|
|
|
|
assert_eq!(
|
|
|
|
pool.maps.len(),
|
|
|
|
SLOTS_RETAINED,
|
|
|
|
"the pool should have length SLOTS_RETAINED"
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut pool_slots = pool
|
|
|
|
.maps
|
|
|
|
.iter()
|
|
|
|
.map(|(slot, _map)| *slot)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
pool_slots.sort_unstable();
|
|
|
|
|
|
|
|
for (j, pool_slot) in pool_slots.iter().enumerate() {
|
|
|
|
let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64;
|
|
|
|
assert_eq!(
|
|
|
|
*pool_slot, expected_slot,
|
|
|
|
"the slot of the map should be {}",
|
|
|
|
expected_slot
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
#[test]
|
|
|
|
fn max_items() {
|
|
|
|
let mut base = $get_method_name(Slot::new(0));
|
|
|
|
$sign_method_name(&mut base, 0, Hash256::random());
|
|
|
|
|
|
|
|
let mut pool: NaiveAggregationPool<$map_type<E>> =
|
|
|
|
NaiveAggregationPool::default();
|
|
|
|
|
|
|
|
for i in 0..=$item_limit {
|
|
|
|
let mut a = base.clone();
|
|
|
|
$block_root_mutator(&mut a, Hash256::from_low_u64_be(i as u64));
|
|
|
|
|
|
|
|
if i < $item_limit {
|
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Ok(InsertOutcome::NewItemInserted { committee_index: 0 }),
|
|
|
|
"should accept item below limit"
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
assert_eq!(
|
|
|
|
pool.insert(&a),
|
|
|
|
Err(Error::ReachedMaxItemsPerSlot($item_limit)),
|
|
|
|
"should not accept item above limit"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
test_suite! {
|
|
|
|
attestation_tests,
|
|
|
|
get_attestation,
|
|
|
|
sign_attestation,
|
|
|
|
unset_attestation_bit,
|
|
|
|
mutate_attestation_block_root,
|
|
|
|
mutate_attestation_slot,
|
|
|
|
attestation_block_root_comparator,
|
|
|
|
key_from_attestation,
|
|
|
|
AggregatedAttestationMap,
|
|
|
|
MAX_ATTESTATIONS_PER_SLOT
|
|
|
|
}
|
2020-03-25 10:14:05 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
test_suite! {
|
|
|
|
sync_contribution_tests,
|
|
|
|
get_sync_contribution,
|
|
|
|
sign_sync_contribution,
|
|
|
|
unset_sync_contribution_bit,
|
|
|
|
mutate_sync_contribution_block_root,
|
|
|
|
mutate_sync_contribution_slot,
|
|
|
|
sync_contribution_block_root_comparator,
|
|
|
|
key_from_sync_contribution,
|
|
|
|
SyncContributionAggregateMap,
|
|
|
|
E::sync_committee_size()
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
}
|