Make slashing protection import more resilient (#2598)

## Issue Addressed

Closes #2419

## Proposed Changes

Address a long-standing issue with the import of slashing protection data where the import would fail due to the data appearing slashable w.r.t the existing database. Importing is now idempotent, and will have no issues importing data that has been handed back and forth between different validator clients, or different implementations.

The implementation works by updating the high and low watermarks if they need updating, and not attempting to check if the input is slashable w.r.t itself or the database. This is a strengthening of the minification that we started to do by default since #2380, and what Teku has been doing since the beginning.

## Additional Info

The only feature we lose by doing this is the ability to do non-minified imports of clock drifted messages (cf. Prysm on Medalla). In theory, with the previous implementation we could import all the messages in case of clock drift and be aware of the "gap" between the real present time and the messages signed in the far future. _However_ for attestations this is close to useless, as the source epoch will advance as soon as justification occurs, which will require us to make slashable attestations with respect to our bogus attestation(s). E.g. if I sign an attestation 100=>200 when the current epoch is 101, then I won't be able to vote in any epochs prior to 101 becoming justified because 101=>102, 101=>103, etc are all surrounded by 100=>200. Seeing as signing attestations gets blocked almost immediately in this case regardless of our import behaviour, there's no point trying to handle it. For blocks the situation is more hopeful due to the lack of surrounds, but losing block proposals from validators who by definition can't attest doesn't seem like an issue (the other block proposers can pick up the slack).
This commit is contained in:
Michael Sproul 2021-10-13 01:49:51 +00:00
parent 34d22b5920
commit 0a77d783a4
11 changed files with 343 additions and 171 deletions

View File

@ -33,11 +33,10 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
Arg::with_name(MINIFY_FLAG)
.long(MINIFY_FLAG)
.takes_value(true)
.default_value("true")
.possible_values(&["false", "true"])
.help(
"Minify the input file before processing. This is *much* faster, \
but will not detect slashable data in the input.",
"Deprecated: Lighthouse no longer requires minification on import \
because it always minifies",
),
),
)
@ -88,7 +87,7 @@ pub fn cli_run<T: EthSpec>(
match matches.subcommand() {
(IMPORT_CMD, Some(matches)) => {
let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?;
let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?;
let minify: Option<bool> = clap_utils::parse_optional(matches, MINIFY_FLAG)?;
let import_file = File::open(&import_filename).map_err(|e| {
format!(
"Unable to open import file at {}: {:?}",
@ -102,12 +101,17 @@ pub fn cli_run<T: EthSpec>(
.map_err(|e| format!("Error parsing file for import: {:?}", e))?;
eprintln!(" [done].");
if minify {
eprint!("Minifying input file for faster loading");
interchange = interchange
.minify()
.map_err(|e| format!("Minification failed: {:?}", e))?;
eprintln!(" [done].");
if let Some(minify) = minify {
eprintln!(
"WARNING: --minify flag is deprecated and will be removed in a future release"
);
if minify {
eprint!("Minifying input file for faster loading");
interchange = interchange
.minify()
.map_err(|e| format!("Minification failed: {:?}", e))?;
eprintln!(" [done].");
}
}
let slashing_protection_database =
@ -120,14 +124,16 @@ pub fn cli_run<T: EthSpec>(
})?;
let display_slot = |slot: Option<Slot>| {
slot.map_or("none".to_string(), |slot| format!("{}", slot.as_u64()))
slot.map_or("none".to_string(), |slot| format!("slot {}", slot.as_u64()))
};
let display_epoch = |epoch: Option<Epoch>| {
epoch.map_or("?".to_string(), |epoch| format!("{}", epoch.as_u64()))
epoch.map_or("?".to_string(), |epoch| format!("epoch {}", epoch.as_u64()))
};
let display_attestation = |source, target| match (source, target) {
(None, None) => "none".to_string(),
(source, target) => format!("{}=>{}", display_epoch(source), display_epoch(target)),
(source, target) => {
format!("{} => {}", display_epoch(source), display_epoch(target))
}
};
match slashing_protection_database
@ -140,18 +146,11 @@ pub fn cli_run<T: EthSpec>(
InterchangeImportOutcome::Success { pubkey, summary } => {
eprintln!("- {:?}", pubkey);
eprintln!(
" - min block: {}",
display_slot(summary.min_block_slot)
" - latest block: {}",
display_slot(summary.max_block_slot)
);
eprintln!(
" - min attestation: {}",
display_attestation(
summary.min_attestation_source,
summary.min_attestation_target
)
);
eprintln!(
" - max attestation: {}",
" - latest attestation: {}",
display_attestation(
summary.max_attestation_source,
summary.max_attestation_target
@ -168,18 +167,20 @@ pub fn cli_run<T: EthSpec>(
}
}
Err(InterchangeError::AtomicBatchAborted(outcomes)) => {
eprintln!("ERROR, slashable data in input:");
eprintln!("ERROR: import aborted due to one or more errors");
for outcome in &outcomes {
if let InterchangeImportOutcome::Failure { pubkey, error } = outcome {
eprintln!("- {:?}", pubkey);
eprintln!(" - error: {:?}", error);
}
}
return Err(
"ERROR: import aborted due to slashable data, see above.\n\
Please see https://lighthouse-book.sigmaprime.io/slashing-protection.html#slashable-data-in-import\n\
IT IS NOT SAFE TO START VALIDATING".to_string()
);
return Err("ERROR: import aborted due to errors, see above.\n\
No data has been imported and the slashing protection \
database is in the same state it was in before the import.\n\
Due to the failed import it is NOT SAFE to start validating\n\
with any newly imported validator keys, as your database lacks\n\
slashing protection data for them."
.to_string());
}
Err(e) => {
return Err(format!(
@ -192,7 +193,7 @@ pub fn cli_run<T: EthSpec>(
eprintln!("Import completed successfully.");
eprintln!(
"Please double-check that the minimum and maximum blocks and attestations above \
"Please double-check that the latest blocks and attestations above \
match your expectations."
);

View File

@ -75,7 +75,7 @@ Instructions for exporting your existing client's database are out of scope for
please check the other client's documentation for instructions.
When importing an interchange file, you still need to import the validator keystores themselves
separately, using the instructions about [importing keystores into
separately, using the instructions for [importing keystores into
Lighthouse](./validator-import-launchpad.md).
---
@ -91,31 +91,24 @@ up to date.
[EIP-3076]: https://eips.ethereum.org/EIPS/eip-3076
### How Import Works
Since version 1.6.0 Lighthouse will ignore any slashable data in the import data and will safely
update the low watermarks for blocks and attestations. It will store only the maximum-slot block
for each validator, and the maximum source/target attestation. This is faster than importing
all data while also being more resilient to repeated imports & stale data.
### Minification
Since version 1.5.0 Lighthouse automatically _minifies_ slashing protection data upon import.
Minification safely shrinks the input file, making it faster to import.
If an import file contains slashable data, then its minification is still safe to import _even
though_ the non-minified file would fail to be imported. This means that leaving minification
enabled is recommended if the input could contain slashable data. Conversely, if you would like to
double-check that the input file is not slashable with respect to itself, then you should disable
minification.
Minification can be disabled for imports by adding `--minify=false` to the command:
```
lighthouse account validator slashing-protection import --minify=false <my_interchange.json>
```
It can also be enabled for exports (disabled by default):
The exporter can be configured to minify (shrink) the data it exports by keeping only the
maximum-slot and maximum-epoch messages. Provide the `--minify=true` flag:
```
lighthouse account validator slashing-protection export --minify=true <lighthouse_interchange.json>
```
Minifying the export file should make it faster to import, and may allow it to be imported into an
implementation that is rejecting the non-minified equivalent due to slashable data.
This may make the file faster to import into other clients, but is unnecessary for Lighthouse to
Lighthouse transfers since v1.5.0.
## Troubleshooting
@ -161,46 +154,6 @@ Sep 29 15:15:05.303 CRIT Not signing slashable attestation error: InvalidA
This log is still marked as `CRIT` because in general it should occur only very rarely,
and _could_ indicate a serious error or misconfiguration (see [Avoiding Slashing](#avoiding-slashing)).
### Slashable Data in Import
During import of an [interchange file](#import-and-export) if you receive an error about
the file containing slashable data, then you must carefully consider whether you want to continue.
There are several potential causes for this error, each of which require a different reaction. If
the error output lists multiple validator keys, the cause could be different for each of them.
1. Your validator has actually signed slashable data. If this is the case, you should assess
whether your validator has been slashed (or is likely to be slashed). It's up to you
whether you'd like to continue.
2. You have exported data from Lighthouse to another client, and then back to Lighthouse,
_in a way that didn't preserve the signing roots_. A message with no signing roots
is considered slashable with respect to _any_ other message at the same slot/epoch,
so even if it was signed by Lighthouse originally, Lighthouse has no way of knowing this.
If you're sure you haven't run Lighthouse and the other client simultaneously, you
can [drop Lighthouse's DB in favour of the interchange file](#drop-and-re-import).
3. You have imported the same interchange file (which lacks signing roots) twice, e.g. from Teku.
It might be safe to continue as-is, or you could consider a [Drop and
Re-import](#drop-and-re-import).
If you are running the import command with `--minify=false`, you should consider enabling
[minification](#minification).
#### Drop and Re-import
If you'd like to prioritize an interchange file over any existing database stored by Lighthouse
then you can _move_ (not delete) Lighthouse's database and replace it like so:
```bash
mv $datadir/validators/slashing_protection.sqlite ~/slashing_protection_backup.sqlite
```
```
lighthouse account validator slashing-protection import <my_interchange.json>
```
If your interchange file doesn't cover all of your validators, you shouldn't do this. Please reach
out on Discord if you need help.
## Limitation of Liability
The Lighthouse developers do not guarantee the perfect functioning of this software, or accept

View File

@ -1,4 +1,4 @@
TESTS_TAG := v5.1.0
TESTS_TAG := v5.2.0
GENERATE_DIR := generated-tests
OUTPUT_DIR := interchange-tests
TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz

View File

@ -216,7 +216,7 @@ fn main() {
],
),
MultiTestCase::new(
"multiple_interchanges_single_validator_single_message_out_of_order",
"multiple_interchanges_single_validator_single_block_out_of_order",
vec![
TestCase::new(interchange(vec![(0, vec![40], vec![])])),
TestCase::new(interchange(vec![(0, vec![20], vec![])]))
@ -233,6 +233,131 @@ fn main() {
.with_blocks(vec![(0, 20, false), (0, 50, false)]),
],
),
MultiTestCase::new(
"multiple_interchanges_single_validator_single_att_out_of_order",
vec![
TestCase::new(interchange(vec![(0, vec![], vec![(12, 13)])])),
TestCase::new(interchange(vec![(0, vec![], vec![(10, 11)])]))
.contains_slashable_data()
.with_attestations(vec![
(0, 10, 14, false),
(0, 12, 13, false),
(0, 12, 14, true),
(0, 13, 15, true),
]),
],
),
MultiTestCase::new(
"multiple_interchanges_single_validator_second_surrounds_first",
vec![
TestCase::new(interchange(vec![(0, vec![], vec![(10, 20)])])),
TestCase::new(interchange(vec![(0, vec![], vec![(9, 21)])]))
.contains_slashable_data()
.with_attestations(vec![
(0, 10, 20, false),
(0, 10, 21, false),
(0, 9, 21, false),
(0, 9, 22, false),
(0, 10, 22, true),
]),
],
),
MultiTestCase::new(
"multiple_interchanges_single_validator_first_surrounds_second",
vec![
TestCase::new(interchange(vec![(0, vec![], vec![(9, 21)])])),
TestCase::new(interchange(vec![(0, vec![], vec![(10, 20)])]))
.contains_slashable_data()
.with_attestations(vec![
(0, 10, 20, false),
(0, 10, 21, false),
(0, 9, 21, false),
(0, 9, 22, false),
(0, 10, 22, true),
]),
],
),
MultiTestCase::new(
"multiple_interchanges_multiple_validators_repeat_idem",
vec![
TestCase::new(interchange(vec![
(0, vec![2, 4, 6], vec![(0, 1), (1, 2)]),
(1, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
])),
TestCase::new(interchange(vec![
(0, vec![2, 4, 6], vec![(0, 1), (1, 2)]),
(1, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
]))
.contains_slashable_data()
.with_blocks(vec![
(0, 0, false),
(0, 3, true),
(0, 7, true),
(0, 3, true),
(1, 0, false),
])
.with_attestations(vec![(0, 0, 4, false), (1, 0, 4, true)]),
],
),
MultiTestCase::new(
"multiple_interchanges_overlapping_validators_repeat_idem",
vec![
TestCase::new(interchange(vec![
(0, vec![2, 4, 6], vec![(0, 1), (1, 2)]),
(1, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
])),
TestCase::new(interchange(vec![
(0, vec![2, 4, 6], vec![(0, 1), (1, 2)]),
(2, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
]))
.contains_slashable_data(),
TestCase::new(interchange(vec![
(1, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
(2, vec![8, 10, 12], vec![(0, 1), (0, 3)]),
]))
.contains_slashable_data()
.with_attestations(vec![
(0, 0, 4, false),
(1, 1, 2, false),
(2, 1, 2, false),
]),
],
),
MultiTestCase::new(
"multiple_interchanges_overlapping_validators_merge_stale",
vec![
TestCase::new(interchange(vec![
(0, vec![100], vec![(12, 13)]),
(1, vec![101], vec![(12, 13)]),
(2, vec![4], vec![(4, 5)]),
])),
TestCase::new(interchange(vec![
(0, vec![2], vec![(4, 5)]),
(1, vec![3], vec![(3, 4)]),
(2, vec![102], vec![(12, 13)]),
]))
.contains_slashable_data()
.with_blocks(vec![
(0, 100, false),
(1, 101, false),
(2, 102, false),
(0, 103, true),
(1, 104, true),
(2, 105, true),
])
.with_attestations(vec![
(0, 12, 13, false),
(0, 11, 14, false),
(1, 12, 13, false),
(1, 11, 14, false),
(2, 12, 13, false),
(2, 11, 14, false),
(0, 12, 14, true),
(1, 13, 14, true),
(2, 13, 14, true),
]),
],
),
MultiTestCase::single(
"single_validator_source_greater_than_target",
TestCase::new(interchange(vec![(0, vec![], vec![(8, 7)])])).contains_slashable_data(),

View File

@ -119,7 +119,7 @@ impl Interchange {
}
}
(None, None) => {}
_ => return Err(InterchangeError::MinAndMaxInconsistent),
_ => return Err(InterchangeError::MaxInconsistent),
};
// Find maximum block slot.

View File

@ -64,15 +64,15 @@ impl MultiTestCase {
let slashing_db_file = dir.path().join("slashing_protection.sqlite");
let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap();
// If minification is used, false positives are allowed, i.e. there may be some situations
// in which signing is safe but the minified file prevents it.
let allow_false_positives = minify;
// Now that we are using implicit minification on import, we must always allow
// false positives.
let allow_false_positives = true;
for test_case in &self.steps {
// If the test case is marked as containing slashable data, then it is permissible
// that we fail to import the file, in which case execution of the whole test should
// be aborted.
let allow_import_failure = test_case.contains_slashable_data;
// If the test case is marked as containing slashable data, then the spec allows us to
// fail to import the file. However, we minify on import and ignore slashable data, so
// we should be capable of importing no matter what.
let allow_import_failure = false;
let interchange = if minify {
let minified = test_case.interchange.minify().unwrap();

View File

@ -35,6 +35,7 @@ pub enum NotSafe {
IOError(ErrorKind),
SQLError(String),
SQLPoolError(String),
ConsistencyError,
}
/// The attestation or block is safe to sign, and will not cause the signer to be slashed.
@ -54,7 +55,7 @@ pub struct SigningRoot(Hash256);
impl PartialEq for SigningRoot {
fn eq(&self, other: &Self) -> bool {
!self.0.is_zero() && self.0 == other.0
!self.is_null() && self.0 == other.0
}
}
@ -71,18 +72,26 @@ impl Into<Hash256> for SigningRoot {
}
impl SigningRoot {
fn to_hash256(self) -> Hash256 {
fn is_null(&self) -> bool {
self.0.is_zero()
}
fn to_hash256_raw(self) -> Hash256 {
self.into()
}
fn to_hash256(self) -> Option<Hash256> {
Some(self.0).filter(|_| !self.is_null())
}
}
/// Safely parse a `Hash256` from the given `column` of an SQLite `row`.
fn hash256_from_row(column: usize, row: &rusqlite::Row) -> rusqlite::Result<Hash256> {
/// Safely parse a `SigningRoot` from the given `column` of an SQLite `row`.
fn signing_root_from_row(column: usize, row: &rusqlite::Row) -> rusqlite::Result<SigningRoot> {
use rusqlite::{types::Type, Error};
let bytes: Vec<u8> = row.get(column)?;
if bytes.len() == 32 {
Ok(Hash256::from_slice(&bytes))
Ok(SigningRoot::from(Hash256::from_slice(&bytes)))
} else {
Err(Error::FromSqlConversionFailure(
column,

View File

@ -1,4 +1,4 @@
use crate::{hash256_from_row, SigningRoot};
use crate::{signing_root_from_row, SigningRoot};
use types::{AttestationData, Epoch, Hash256, SignedRoot};
/// An attestation that has previously been signed.
@ -56,7 +56,7 @@ impl SignedAttestation {
pub fn from_row(row: &rusqlite::Row) -> rusqlite::Result<Self> {
let source = row.get(0)?;
let target = row.get(1)?;
let signing_root = hash256_from_row(2, row)?.into();
let signing_root = signing_root_from_row(2, row)?;
Ok(SignedAttestation::new(source, target, signing_root))
}
}

View File

@ -1,4 +1,4 @@
use crate::{hash256_from_row, SigningRoot};
use crate::{signing_root_from_row, SigningRoot};
use types::{BeaconBlockHeader, Hash256, SignedRoot, Slot};
/// A block that has previously been signed.
@ -30,7 +30,7 @@ impl SignedBlock {
/// Parse an SQLite row of `(slot, signing_root)`.
pub fn from_row(row: &rusqlite::Row) -> rusqlite::Result<Self> {
let slot = row.get(0)?;
let signing_root = hash256_from_row(1, row)?.into();
let signing_root = signing_root_from_row(1, row)?;
Ok(SignedBlock { slot, signing_root })
}
}

View File

@ -4,7 +4,7 @@ use crate::interchange::{
};
use crate::signed_attestation::InvalidAttestation;
use crate::signed_block::InvalidBlock;
use crate::{hash256_from_row, NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot};
use crate::{signing_root_from_row, NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot};
use filesystem::restrict_file_permissions;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::{params, OptionalExtension, Transaction, TransactionBehavior};
@ -403,7 +403,7 @@ impl SlashingDatabase {
txn.execute(
"INSERT INTO signed_blocks (validator_id, slot, signing_root)
VALUES (?1, ?2, ?3)",
params![validator_id, slot, signing_root.to_hash256().as_bytes()],
params![validator_id, slot, signing_root.to_hash256_raw().as_bytes()],
)?;
Ok(())
}
@ -429,7 +429,7 @@ impl SlashingDatabase {
validator_id,
att_source_epoch,
att_target_epoch,
att_signing_root.to_hash256().as_bytes()
att_signing_root.to_hash256_raw().as_bytes()
],
)?;
Ok(())
@ -612,65 +612,86 @@ impl SlashingDatabase {
pub fn import_interchange_record(
&self,
mut record: InterchangeData,
record: InterchangeData,
txn: &Transaction,
) -> Result<ValidatorSummary, NotSafe> {
self.register_validators_in_txn(std::iter::once(&record.pubkey), txn)?;
let pubkey = &record.pubkey;
// Insert all signed blocks, sorting them so that the minimum bounds are not
// violated by blocks earlier in the file.
record.signed_blocks.sort_unstable_by_key(|b| b.slot);
for block in &record.signed_blocks {
self.check_and_insert_block_signing_root_txn(
&record.pubkey,
block.slot,
block
.signing_root
.map(SigningRoot::from)
.unwrap_or_default(),
txn,
)?;
self.register_validators_in_txn(std::iter::once(pubkey), txn)?;
// Summary of minimum and maximum messages pre-import.
let prev_summary = self.validator_summary(pubkey, txn)?;
// If the interchange contains a new maximum slot block, import it.
let max_block = record.signed_blocks.iter().max_by_key(|b| b.slot);
if let Some(max_block) = max_block {
// Block is relevant if there are no previous blocks, or new block has slot greater than
// previous maximum.
if prev_summary
.max_block_slot
.map_or(true, |max_block_slot| max_block.slot > max_block_slot)
{
self.insert_block_proposal(
txn,
pubkey,
max_block.slot,
max_block
.signing_root
.map(SigningRoot::from)
.unwrap_or_default(),
)?;
// Prune the database so that it contains *only* the new block.
self.prune_signed_blocks(&record.pubkey, max_block.slot, txn)?;
}
}
// Prune blocks less than the min slot from this interchange file.
// This ensures we don't sign anything less than the min slot after successful import,
// which is signficant if we have imported two files with a "gap" in between.
if let Some(new_min_slot) = record.signed_blocks.iter().map(|block| block.slot).min() {
self.prune_signed_blocks(&record.pubkey, new_min_slot, txn)?;
}
// Insert all signed attestations.
record
.signed_attestations
.sort_unstable_by_key(|att| (att.source_epoch, att.target_epoch));
for attestation in &record.signed_attestations {
self.check_and_insert_attestation_signing_root_txn(
&record.pubkey,
attestation.source_epoch,
attestation.target_epoch,
attestation
.signing_root
.map(SigningRoot::from)
.unwrap_or_default(),
txn,
)?;
}
// Prune attestations less than the min target from this interchange file.
// See the rationale for blocks above, and the doc comment for `prune_signed_attestations`
// for why we don't need to separately prune for the min source.
if let Some(new_min_target) = record
// Find the attestations with max source and max target. Unless the input contains slashable
// data these two attestations should be identical, but we also handle the case where they
// are not.
let max_source_attestation = record
.signed_attestations
.iter()
.map(|attestation| attestation.target_epoch)
.min()
.max_by_key(|att| att.source_epoch);
let max_target_attestation = record
.signed_attestations
.iter()
.max_by_key(|att| att.target_epoch);
if let (Some(max_source_att), Some(max_target_att)) =
(max_source_attestation, max_target_attestation)
{
self.prune_signed_attestations(&record.pubkey, new_min_target, txn)?;
let source_epoch = max_or(
prev_summary.max_attestation_source,
max_source_att.source_epoch,
);
let target_epoch = max_or(
prev_summary.max_attestation_target,
max_target_att.target_epoch,
);
let signing_root = SigningRoot::default();
// Clear existing attestations before insert to avoid running afoul of the target epoch
// uniqueness constraint.
self.clear_signed_attestations(pubkey, txn)?;
self.insert_attestation(txn, pubkey, source_epoch, target_epoch, signing_root)?;
}
let summary = self.validator_summary(&record.pubkey, txn)?;
Ok(summary)
// Check that the summary is consistent with having added the new data.
if summary.check_block_consistency(&prev_summary, !record.signed_blocks.is_empty())
&& summary.check_attestation_consistency(
&prev_summary,
!record.signed_attestations.is_empty(),
)
{
Ok(summary)
} else {
// This should never occur and is indicative of a bug in the import code.
Err(NotSafe::ConsistencyError)
}
}
pub fn export_interchange_info(
@ -695,7 +716,7 @@ impl SlashingDatabase {
.query_and_then(params![], |row| {
let validator_pubkey: String = row.get(0)?;
let slot = row.get(1)?;
let signing_root = Some(hash256_from_row(2, row)?);
let signing_root = signing_root_from_row(2, row)?.to_hash256();
let signed_block = InterchangeBlock { slot, signing_root };
data.entry(validator_pubkey)
.or_insert_with(|| (vec![], vec![]))
@ -715,7 +736,7 @@ impl SlashingDatabase {
let validator_pubkey: String = row.get(0)?;
let source_epoch = row.get(1)?;
let target_epoch = row.get(2)?;
let signing_root = Some(hash256_from_row(3, row)?);
let signing_root = signing_root_from_row(3, row)?.to_hash256();
let signed_attestation = InterchangeAttestation {
source_epoch,
target_epoch,
@ -786,12 +807,6 @@ impl SlashingDatabase {
/// Remove all attestations for `public_key` with `target < new_min_target`.
///
/// Pruning every attestation with target less than `new_min_target` also has the effect of
/// making the new minimum source the source of the attestation with `target == new_min_target`
/// (if any exists). This is exactly what's required for pruning after importing an interchange
/// file, whereby we want to update the new minimum source to the min source from the
/// interchange.
///
/// If the `new_min_target` was plucked out of thin air and doesn't necessarily correspond to
/// an extant attestation then this function is still safe. It will never delete *all* the
/// attestations in the database.
@ -803,7 +818,7 @@ impl SlashingDatabase {
) -> Result<(), NotSafe> {
let validator_id = self.get_validator_id_in_txn(txn, public_key)?;
// The following holds:
// The following holds, because we never store mutually slashable attestations:
// a.target < new_min_target --> a.source <= new_min_source
//
// The `MAX(target_epoch)` acts as a guard to prevent accidentally clearing the DB.
@ -821,6 +836,25 @@ impl SlashingDatabase {
Ok(())
}
/// Remove all attestations signed by a given `public_key`.
///
/// This function is incredibly dangerous and should be used with extreme caution. Presently
/// we only use it one place: immediately before inserting a new maximum source/maximum target
/// attestation. Any future use should take care to respect the database's non-emptiness.
fn clear_signed_attestations(
&self,
public_key: &PublicKeyBytes,
txn: &Transaction,
) -> Result<(), NotSafe> {
let validator_id = self.get_validator_id_in_txn(txn, public_key)?;
txn.execute(
"DELETE FROM signed_attestations WHERE validator_id = ?1",
params![validator_id],
)?;
Ok(())
}
/// Prune the signed attestations table for the given validator keys.
pub fn prune_all_signed_attestations<'a>(
&self,
@ -844,7 +878,7 @@ impl SlashingDatabase {
Ok(count)
}
/// Get a summary of a validator's slashing protection data for consumption by the user.
/// Get a summary of a validator's slashing protection data including minimums and maximums.
pub fn validator_summary(
&self,
public_key: &PublicKeyBytes,
@ -896,6 +930,51 @@ pub struct ValidatorSummary {
pub max_attestation_target: Option<Epoch>,
}
impl ValidatorSummary {
fn check_block_consistency(&self, prev: &Self, imported_blocks: bool) -> bool {
if imported_blocks {
// Max block slot should be monotonically increasing and non-null.
// Minimum should match maximum due to pruning.
monotonic(self.max_block_slot, prev.max_block_slot)
&& self.min_block_slot == self.max_block_slot
} else {
// Block slots should be unchanged.
prev.min_block_slot == self.min_block_slot && prev.max_block_slot == self.max_block_slot
}
}
fn check_attestation_consistency(&self, prev: &Self, imported_attestations: bool) -> bool {
if imported_attestations {
// Max source and target epochs should be monotically increasing and non-null.
// Minimums should match maximums due to pruning.
monotonic(self.max_attestation_source, prev.max_attestation_source)
&& monotonic(self.max_attestation_target, prev.max_attestation_target)
&& self.min_attestation_source == self.max_attestation_source
&& self.min_attestation_target == self.max_attestation_target
} else {
// Attestation epochs should be unchanged.
self.min_attestation_source == prev.min_attestation_source
&& self.max_attestation_source == prev.max_attestation_source
&& self.min_attestation_target == prev.min_attestation_target
&& self.max_attestation_target == prev.max_attestation_target
}
}
}
/// Take the maximum of `opt_x` and `y`, returning `y` if `opt_x` is `None`.
fn max_or<T: Copy + Ord>(opt_x: Option<T>, y: T) -> T {
opt_x.map_or(y, |x| std::cmp::max(x, y))
}
/// Check that `new` is `Some` and greater than or equal to prev.
///
/// If prev is `None` and `new` is `Some` then `true` is returned.
fn monotonic<T: PartialOrd>(new: Option<T>, prev: Option<T>) -> bool {
new.map_or(false, |new_val| {
prev.map_or(true, |prev_val| new_val >= prev_val)
})
}
/// The result of importing a single entry from an interchange file.
#[derive(Debug)]
pub enum InterchangeImportOutcome {
@ -922,13 +1001,13 @@ pub enum InterchangeError {
interchange_file: Hash256,
client: Hash256,
},
MinAndMaxInconsistent,
MaxInconsistent,
SummaryInconsistent,
SQLError(String),
SQLPoolError(r2d2::Error),
SerdeJsonError(serde_json::Error),
InvalidPubkey(String),
NotSafe(NotSafe),
/// One or more records were found to be slashable, so the whole batch was aborted.
AtomicBatchAborted(Vec<InterchangeImportOutcome>),
}

View File

@ -129,6 +129,8 @@ impl StreamTest<BeaconBlockHeader> {
}
}
// This function roundtrips the database, but applies minification in order to be compatible with
// the implicit minification done on import.
fn roundtrip_database(dir: &TempDir, db: &SlashingDatabase, is_empty: bool) {
let exported = db
.export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT)
@ -142,6 +144,9 @@ fn roundtrip_database(dir: &TempDir, db: &SlashingDatabase, is_empty: bool) {
.export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT)
.unwrap();
assert_eq!(exported, reexported);
assert!(exported
.minify()
.unwrap()
.equiv(&reexported.minify().unwrap()));
assert_eq!(is_empty, exported.is_empty());
}