2021-06-21 05:46:36 +00:00
|
|
|
use crate::InterchangeError;
|
2020-10-02 01:42:27 +00:00
|
|
|
use serde_derive::{Deserialize, Serialize};
|
2021-06-21 05:46:36 +00:00
|
|
|
use std::cmp::max;
|
|
|
|
use std::collections::{HashMap, HashSet};
|
|
|
|
use std::io;
|
2021-03-17 05:09:57 +00:00
|
|
|
use types::{Epoch, Hash256, PublicKeyBytes, Slot};
|
2020-10-02 01:42:27 +00:00
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
|
|
|
#[serde(deny_unknown_fields)]
|
2022-01-04 20:46:44 +00:00
|
|
|
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub struct InterchangeMetadata {
|
2021-09-03 01:10:25 +00:00
|
|
|
#[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub interchange_format_version: u64,
|
|
|
|
pub genesis_validators_root: Hash256,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
|
|
|
|
#[serde(deny_unknown_fields)]
|
2022-01-04 20:46:44 +00:00
|
|
|
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
|
2020-11-09 05:04:01 +00:00
|
|
|
pub struct InterchangeData {
|
2021-03-17 05:09:57 +00:00
|
|
|
pub pubkey: PublicKeyBytes,
|
2020-10-02 01:42:27 +00:00
|
|
|
pub signed_blocks: Vec<SignedBlock>,
|
|
|
|
pub signed_attestations: Vec<SignedAttestation>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
|
|
|
|
#[serde(deny_unknown_fields)]
|
2022-01-04 20:46:44 +00:00
|
|
|
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub struct SignedBlock {
|
2021-09-03 01:10:25 +00:00
|
|
|
#[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub slot: Slot,
|
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
|
|
pub signing_root: Option<Hash256>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
|
|
|
|
#[serde(deny_unknown_fields)]
|
2022-01-04 20:46:44 +00:00
|
|
|
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub struct SignedAttestation {
|
2021-09-03 01:10:25 +00:00
|
|
|
#[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub source_epoch: Epoch,
|
2021-09-03 01:10:25 +00:00
|
|
|
#[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub target_epoch: Epoch,
|
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
|
|
pub signing_root: Option<Hash256>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
2022-01-04 20:46:44 +00:00
|
|
|
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
|
2020-10-02 01:42:27 +00:00
|
|
|
pub struct Interchange {
|
|
|
|
pub metadata: InterchangeMetadata,
|
2020-11-09 05:04:01 +00:00
|
|
|
pub data: Vec<InterchangeData>,
|
2020-10-02 01:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Interchange {
|
|
|
|
pub fn from_json_str(json: &str) -> Result<Self, serde_json::Error> {
|
|
|
|
serde_json::from_str(json)
|
|
|
|
}
|
|
|
|
|
2021-06-21 05:46:36 +00:00
|
|
|
pub fn from_json_reader(mut reader: impl std::io::Read) -> Result<Self, io::Error> {
|
|
|
|
// We read the entire file into memory first, as this is *a lot* faster than using
|
|
|
|
// `serde_json::from_reader`. See https://github.com/serde-rs/json/issues/160
|
|
|
|
let mut json_str = String::new();
|
|
|
|
reader.read_to_string(&mut json_str)?;
|
|
|
|
Ok(Interchange::from_json_str(&json_str)?)
|
2020-10-02 01:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn write_to(&self, writer: impl std::io::Write) -> Result<(), serde_json::Error> {
|
|
|
|
serde_json::to_writer(writer, self)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Do these two `Interchange`s contain the same data (ignoring ordering)?
|
|
|
|
pub fn equiv(&self, other: &Self) -> bool {
|
2021-01-19 00:34:28 +00:00
|
|
|
let self_set = self.data.iter().collect::<HashSet<_>>();
|
|
|
|
let other_set = other.data.iter().collect::<HashSet<_>>();
|
2020-10-02 01:42:27 +00:00
|
|
|
self.metadata == other.metadata && self_set == other_set
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The number of entries in `data`.
|
|
|
|
pub fn len(&self) -> usize {
|
|
|
|
self.data.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Is the `data` part of the interchange completely empty?
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.len() == 0
|
|
|
|
}
|
2021-06-21 05:46:36 +00:00
|
|
|
|
|
|
|
/// Minify an interchange by constructing a synthetic block & attestation for each validator.
|
|
|
|
pub fn minify(&self) -> Result<Self, InterchangeError> {
|
|
|
|
// Map from pubkey to optional max block and max attestation.
|
|
|
|
let mut validator_data =
|
|
|
|
HashMap::<PublicKeyBytes, (Option<SignedBlock>, Option<SignedAttestation>)>::new();
|
|
|
|
|
|
|
|
for data in self.data.iter() {
|
|
|
|
// Existing maximum attestation and maximum block.
|
|
|
|
let (max_block, max_attestation) = validator_data
|
|
|
|
.entry(data.pubkey)
|
|
|
|
.or_insert_with(|| (None, None));
|
|
|
|
|
|
|
|
// Find maximum source and target epochs.
|
|
|
|
let max_source_epoch = data
|
|
|
|
.signed_attestations
|
|
|
|
.iter()
|
|
|
|
.map(|attestation| attestation.source_epoch)
|
|
|
|
.max();
|
|
|
|
let max_target_epoch = data
|
|
|
|
.signed_attestations
|
|
|
|
.iter()
|
|
|
|
.map(|attestation| attestation.target_epoch)
|
|
|
|
.max();
|
|
|
|
|
|
|
|
match (max_source_epoch, max_target_epoch) {
|
|
|
|
(Some(source_epoch), Some(target_epoch)) => {
|
|
|
|
if let Some(prev_max) = max_attestation {
|
|
|
|
prev_max.source_epoch = max(prev_max.source_epoch, source_epoch);
|
|
|
|
prev_max.target_epoch = max(prev_max.target_epoch, target_epoch);
|
|
|
|
} else {
|
|
|
|
*max_attestation = Some(SignedAttestation {
|
|
|
|
source_epoch,
|
|
|
|
target_epoch,
|
|
|
|
signing_root: None,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(None, None) => {}
|
Make slashing protection import more resilient (#2598)
## Issue Addressed
Closes #2419
## Proposed Changes
Address a long-standing issue with the import of slashing protection data where the import would fail due to the data appearing slashable w.r.t the existing database. Importing is now idempotent, and will have no issues importing data that has been handed back and forth between different validator clients, or different implementations.
The implementation works by updating the high and low watermarks if they need updating, and not attempting to check if the input is slashable w.r.t itself or the database. This is a strengthening of the minification that we started to do by default since #2380, and what Teku has been doing since the beginning.
## Additional Info
The only feature we lose by doing this is the ability to do non-minified imports of clock drifted messages (cf. Prysm on Medalla). In theory, with the previous implementation we could import all the messages in case of clock drift and be aware of the "gap" between the real present time and the messages signed in the far future. _However_ for attestations this is close to useless, as the source epoch will advance as soon as justification occurs, which will require us to make slashable attestations with respect to our bogus attestation(s). E.g. if I sign an attestation 100=>200 when the current epoch is 101, then I won't be able to vote in any epochs prior to 101 becoming justified because 101=>102, 101=>103, etc are all surrounded by 100=>200. Seeing as signing attestations gets blocked almost immediately in this case regardless of our import behaviour, there's no point trying to handle it. For blocks the situation is more hopeful due to the lack of surrounds, but losing block proposals from validators who by definition can't attest doesn't seem like an issue (the other block proposers can pick up the slack).
2021-10-13 01:49:51 +00:00
|
|
|
_ => return Err(InterchangeError::MaxInconsistent),
|
2021-06-21 05:46:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Find maximum block slot.
|
|
|
|
let max_block_slot = data.signed_blocks.iter().map(|block| block.slot).max();
|
|
|
|
|
|
|
|
if let Some(max_slot) = max_block_slot {
|
|
|
|
if let Some(prev_max) = max_block {
|
|
|
|
prev_max.slot = max(prev_max.slot, max_slot);
|
|
|
|
} else {
|
|
|
|
*max_block = Some(SignedBlock {
|
|
|
|
slot: max_slot,
|
|
|
|
signing_root: None,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let data = validator_data
|
|
|
|
.into_iter()
|
|
|
|
.map(|(pubkey, (maybe_block, maybe_att))| InterchangeData {
|
|
|
|
pubkey,
|
|
|
|
signed_blocks: maybe_block.into_iter().collect(),
|
|
|
|
signed_attestations: maybe_att.into_iter().collect(),
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Ok(Self {
|
|
|
|
metadata: self.metadata.clone(),
|
|
|
|
data,
|
|
|
|
})
|
|
|
|
}
|
2020-10-02 01:42:27 +00:00
|
|
|
}
|