2020-05-18 06:25:16 +00:00
|
|
|
use crate::*;
|
2020-10-02 01:42:27 +00:00
|
|
|
use tempfile::{tempdir, TempDir};
|
2020-05-18 06:25:16 +00:00
|
|
|
use types::{
|
|
|
|
test_utils::generate_deterministic_keypair, AttestationData, BeaconBlockHeader, Hash256,
|
2021-03-17 05:09:57 +00:00
|
|
|
PublicKeyBytes,
|
2020-05-18 06:25:16 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
pub const DEFAULT_VALIDATOR_INDEX: usize = 0;
|
|
|
|
pub const DEFAULT_DOMAIN: Hash256 = Hash256::zero();
|
2020-10-02 01:42:27 +00:00
|
|
|
pub const DEFAULT_GENESIS_VALIDATORS_ROOT: Hash256 = Hash256::zero();
|
2020-05-18 06:25:16 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
pub fn pubkey(index: usize) -> PublicKeyBytes {
|
|
|
|
generate_deterministic_keypair(index).pk.compress()
|
2020-05-18 06:25:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct Test<T> {
|
2021-03-17 05:09:57 +00:00
|
|
|
pubkey: PublicKeyBytes,
|
2020-05-18 06:25:16 +00:00
|
|
|
data: T,
|
|
|
|
domain: Hash256,
|
|
|
|
expected: Result<Safe, NotSafe>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Test<T> {
|
|
|
|
pub fn single(data: T) -> Self {
|
|
|
|
Self::with_pubkey(pubkey(DEFAULT_VALIDATOR_INDEX), data)
|
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
pub fn with_pubkey(pubkey: PublicKeyBytes, data: T) -> Self {
|
2020-05-18 06:25:16 +00:00
|
|
|
Self {
|
|
|
|
pubkey,
|
|
|
|
data,
|
|
|
|
domain: DEFAULT_DOMAIN,
|
|
|
|
expected: Ok(Safe::Valid),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn with_domain(mut self, domain: Hash256) -> Self {
|
|
|
|
self.domain = domain;
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn expect_result(mut self, result: Result<Safe, NotSafe>) -> Self {
|
|
|
|
self.expected = result;
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn expect_invalid_att(self, error: InvalidAttestation) -> Self {
|
|
|
|
self.expect_result(Err(NotSafe::InvalidAttestation(error)))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn expect_invalid_block(self, error: InvalidBlock) -> Self {
|
|
|
|
self.expect_result(Err(NotSafe::InvalidBlock(error)))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn expect_same_data(self) -> Self {
|
|
|
|
self.expect_result(Ok(Safe::SameData))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct StreamTest<T> {
|
|
|
|
/// Validators to register.
|
2021-03-17 05:09:57 +00:00
|
|
|
pub registered_validators: Vec<PublicKeyBytes>,
|
2020-05-18 06:25:16 +00:00
|
|
|
/// Vector of cases and the value expected when calling `check_and_insert_X`.
|
|
|
|
pub cases: Vec<Test<T>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Default for StreamTest<T> {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
registered_validators: vec![pubkey(DEFAULT_VALIDATOR_INDEX)],
|
|
|
|
cases: vec![],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-02 01:42:27 +00:00
|
|
|
impl<T> StreamTest<T> {
|
|
|
|
/// The number of test cases that are expected to pass processing successfully.
|
|
|
|
fn num_expected_successes(&self) -> usize {
|
|
|
|
self.cases
|
|
|
|
.iter()
|
|
|
|
.filter(|case| case.expected.is_ok())
|
|
|
|
.count()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-18 06:25:16 +00:00
|
|
|
impl StreamTest<AttestationData> {
|
|
|
|
pub fn run(&self) {
|
|
|
|
let dir = tempdir().unwrap();
|
|
|
|
let slashing_db_file = dir.path().join("slashing_protection.sqlite");
|
|
|
|
let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap();
|
|
|
|
|
|
|
|
for pubkey in &self.registered_validators {
|
2021-03-17 05:09:57 +00:00
|
|
|
slashing_db.register_validator(*pubkey).unwrap();
|
2020-05-18 06:25:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i, test) in self.cases.iter().enumerate() {
|
|
|
|
assert_eq!(
|
|
|
|
slashing_db.check_and_insert_attestation(&test.pubkey, &test.data, test.domain),
|
|
|
|
test.expected,
|
|
|
|
"attestation {} not processed as expected",
|
|
|
|
i
|
|
|
|
);
|
|
|
|
}
|
2020-10-02 01:42:27 +00:00
|
|
|
|
|
|
|
roundtrip_database(&dir, &slashing_db, self.num_expected_successes() == 0);
|
2020-05-18 06:25:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl StreamTest<BeaconBlockHeader> {
|
|
|
|
pub fn run(&self) {
|
|
|
|
let dir = tempdir().unwrap();
|
|
|
|
let slashing_db_file = dir.path().join("slashing_protection.sqlite");
|
|
|
|
let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap();
|
|
|
|
|
|
|
|
for pubkey in &self.registered_validators {
|
2021-03-17 05:09:57 +00:00
|
|
|
slashing_db.register_validator(*pubkey).unwrap();
|
2020-05-18 06:25:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i, test) in self.cases.iter().enumerate() {
|
|
|
|
assert_eq!(
|
|
|
|
slashing_db.check_and_insert_block_proposal(&test.pubkey, &test.data, test.domain),
|
|
|
|
test.expected,
|
|
|
|
"attestation {} not processed as expected",
|
|
|
|
i
|
|
|
|
);
|
|
|
|
}
|
2020-10-02 01:42:27 +00:00
|
|
|
|
|
|
|
roundtrip_database(&dir, &slashing_db, self.num_expected_successes() == 0);
|
2020-05-18 06:25:16 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-02 01:42:27 +00:00
|
|
|
|
Make slashing protection import more resilient (#2598)
## Issue Addressed
Closes #2419
## Proposed Changes
Address a long-standing issue with the import of slashing protection data where the import would fail due to the data appearing slashable w.r.t the existing database. Importing is now idempotent, and will have no issues importing data that has been handed back and forth between different validator clients, or different implementations.
The implementation works by updating the high and low watermarks if they need updating, and not attempting to check if the input is slashable w.r.t itself or the database. This is a strengthening of the minification that we started to do by default since #2380, and what Teku has been doing since the beginning.
## Additional Info
The only feature we lose by doing this is the ability to do non-minified imports of clock drifted messages (cf. Prysm on Medalla). In theory, with the previous implementation we could import all the messages in case of clock drift and be aware of the "gap" between the real present time and the messages signed in the far future. _However_ for attestations this is close to useless, as the source epoch will advance as soon as justification occurs, which will require us to make slashable attestations with respect to our bogus attestation(s). E.g. if I sign an attestation 100=>200 when the current epoch is 101, then I won't be able to vote in any epochs prior to 101 becoming justified because 101=>102, 101=>103, etc are all surrounded by 100=>200. Seeing as signing attestations gets blocked almost immediately in this case regardless of our import behaviour, there's no point trying to handle it. For blocks the situation is more hopeful due to the lack of surrounds, but losing block proposals from validators who by definition can't attest doesn't seem like an issue (the other block proposers can pick up the slack).
2021-10-13 01:49:51 +00:00
|
|
|
// This function roundtrips the database, but applies minification in order to be compatible with
|
|
|
|
// the implicit minification done on import.
|
2020-10-02 01:42:27 +00:00
|
|
|
fn roundtrip_database(dir: &TempDir, db: &SlashingDatabase, is_empty: bool) {
|
|
|
|
let exported = db
|
|
|
|
.export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT)
|
|
|
|
.unwrap();
|
|
|
|
let new_db =
|
|
|
|
SlashingDatabase::create(&dir.path().join("roundtrip_slashing_protection.sqlite")).unwrap();
|
|
|
|
new_db
|
2020-11-24 07:21:14 +00:00
|
|
|
.import_interchange_info(exported.clone(), DEFAULT_GENESIS_VALIDATORS_ROOT)
|
2020-10-02 01:42:27 +00:00
|
|
|
.unwrap();
|
|
|
|
let reexported = new_db
|
|
|
|
.export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT)
|
|
|
|
.unwrap();
|
|
|
|
|
Make slashing protection import more resilient (#2598)
## Issue Addressed
Closes #2419
## Proposed Changes
Address a long-standing issue with the import of slashing protection data where the import would fail due to the data appearing slashable w.r.t the existing database. Importing is now idempotent, and will have no issues importing data that has been handed back and forth between different validator clients, or different implementations.
The implementation works by updating the high and low watermarks if they need updating, and not attempting to check if the input is slashable w.r.t itself or the database. This is a strengthening of the minification that we started to do by default since #2380, and what Teku has been doing since the beginning.
## Additional Info
The only feature we lose by doing this is the ability to do non-minified imports of clock drifted messages (cf. Prysm on Medalla). In theory, with the previous implementation we could import all the messages in case of clock drift and be aware of the "gap" between the real present time and the messages signed in the far future. _However_ for attestations this is close to useless, as the source epoch will advance as soon as justification occurs, which will require us to make slashable attestations with respect to our bogus attestation(s). E.g. if I sign an attestation 100=>200 when the current epoch is 101, then I won't be able to vote in any epochs prior to 101 becoming justified because 101=>102, 101=>103, etc are all surrounded by 100=>200. Seeing as signing attestations gets blocked almost immediately in this case regardless of our import behaviour, there's no point trying to handle it. For blocks the situation is more hopeful due to the lack of surrounds, but losing block proposals from validators who by definition can't attest doesn't seem like an issue (the other block proposers can pick up the slack).
2021-10-13 01:49:51 +00:00
|
|
|
assert!(exported
|
|
|
|
.minify()
|
|
|
|
.unwrap()
|
|
|
|
.equiv(&reexported.minify().unwrap()));
|
2020-10-02 01:42:27 +00:00
|
|
|
assert_eq!(is_empty, exported.is_empty());
|
|
|
|
}
|