lighthouse/slasher/tests/proposer_slashings.rs
Michael Sproul 92d597ad23 Modularise slasher backend (#3443)
## Proposed Changes

Enable multiple database backends for the slasher, either MDBX (default) or LMDB. The backend can be selected using `--slasher-backend={lmdb,mdbx}`.

## Additional Info

In order to abstract over the two library's different handling of database lifetimes I've used `Box::leak` to give the `Environment` type a `'static` lifetime. This was the only way I could think of using 100% safe code to construct a self-referential struct `SlasherDB`, where the `OpenDatabases` refers to the `Environment`. I think this is OK, as the `Environment` is expected to live for the life of the program, and both database engines leave the database in a consistent state after each write. The memory claimed for memory-mapping will be freed by the OS and appropriately flushed regardless of whether the `Environment` is actually dropped.

We are depending on two `sigp` forks of `libmdbx-rs` and `lmdb-rs`, to give us greater control over MDBX OS support and LMDB's version.
2022-08-15 01:30:56 +00:00

65 lines
2.2 KiB
Rust

#![cfg(any(feature = "mdbx", feature = "lmdb"))]
use logging::test_logger;
use slasher::{
test_utils::{block as test_block, E},
Config, Slasher,
};
use tempfile::tempdir;
use types::{Epoch, EthSpec};
#[test]
fn empty_pruning() {
let tempdir = tempdir().unwrap();
let config = Config::new(tempdir.path().into());
let slasher = Slasher::<E>::open(config, test_logger()).unwrap();
slasher.prune_database(Epoch::new(0)).unwrap();
}
#[test]
fn block_pruning() {
let slots_per_epoch = E::slots_per_epoch();
let tempdir = tempdir().unwrap();
let mut config = Config::new(tempdir.path().into());
config.chunk_size = 2;
config.history_length = 2;
let slasher = Slasher::<E>::open(config.clone(), test_logger()).unwrap();
let current_epoch = Epoch::from(2 * config.history_length);
// Pruning the empty database should be safe.
slasher.prune_database(Epoch::new(0)).unwrap();
slasher.prune_database(current_epoch).unwrap();
// Add blocks in excess of the history length and prune them away.
let proposer_index = 100_000; // high to check sorting by slot
for slot in 1..=current_epoch.as_u64() * slots_per_epoch {
slasher.accept_block_header(test_block(slot, proposer_index, 0));
}
slasher.process_queued(current_epoch).unwrap();
slasher.prune_database(current_epoch).unwrap();
// Add more conflicting blocks, and check that only the ones within the non-pruned
// section are detected as slashable.
for slot in 1..=current_epoch.as_u64() * slots_per_epoch {
slasher.accept_block_header(test_block(slot, proposer_index, 1));
}
slasher.process_queued(current_epoch).unwrap();
let proposer_slashings = slasher.get_proposer_slashings();
// Check number of proposer slashings, accounting for single block in current epoch.
assert_eq!(
proposer_slashings.len(),
(config.history_length - 1) * slots_per_epoch as usize + 1
);
// Check epochs of all slashings are from within range.
assert!(proposer_slashings.iter().all(|slashing| slashing
.signed_header_1
.message
.slot
.epoch(slots_per_epoch)
> current_epoch - config.history_length as u64));
}