2019-08-06 12:56:13 +00:00
|
|
|
#![cfg(not(debug_assertions))]
|
2019-06-23 21:45:34 +00:00
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
use beacon_chain::{
|
|
|
|
attestation_verification::Error as AttnError,
|
|
|
|
test_utils::{
|
2020-10-19 05:58:39 +00:00
|
|
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
2020-08-26 09:24:55 +00:00
|
|
|
OP_POOL_DB_KEY,
|
2020-05-06 11:42:56 +00:00
|
|
|
},
|
2021-06-17 02:10:46 +00:00
|
|
|
StateSkipConfig, WhenSlotSkipped,
|
2019-09-04 00:25:30 +00:00
|
|
|
};
|
2021-10-01 19:57:50 +00:00
|
|
|
use lazy_static::lazy_static;
|
2020-03-06 05:09:41 +00:00
|
|
|
use operation_pool::PersistedOperationPool;
|
2019-12-09 10:17:57 +00:00
|
|
|
use state_processing::{
|
|
|
|
per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError,
|
|
|
|
};
|
2019-12-16 23:37:12 +00:00
|
|
|
use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot};
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
// Should ideally be divisible by 3.
|
|
|
|
pub const VALIDATOR_COUNT: usize = 24;
|
|
|
|
|
2019-08-07 06:40:49 +00:00
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
|
|
}
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
2021-10-14 02:58:10 +00:00
|
|
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
|
|
|
.default_spec()
|
|
|
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
|
|
|
.fresh_ephemeral_store()
|
2022-02-28 22:07:48 +00:00
|
|
|
.mock_execution_layer()
|
2021-10-14 02:58:10 +00:00
|
|
|
.build();
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
|
|
|
|
harness
|
|
|
|
}
|
|
|
|
|
2019-12-09 10:17:57 +00:00
|
|
|
#[test]
|
|
|
|
fn massive_skips() {
|
|
|
|
let harness = get_harness(8);
|
2021-07-09 06:15:32 +00:00
|
|
|
let spec = &harness.chain.spec;
|
2020-01-06 06:30:37 +00:00
|
|
|
let mut state = harness.chain.head().expect("should get head").beacon_state;
|
2019-12-09 10:17:57 +00:00
|
|
|
|
|
|
|
// Run per_slot_processing until it returns an error.
|
|
|
|
let error = loop {
|
2020-01-03 04:09:00 +00:00
|
|
|
match per_slot_processing(&mut state, None, spec) {
|
2019-12-09 10:17:57 +00:00
|
|
|
Ok(_) => continue,
|
|
|
|
Err(e) => break e,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
assert!(state.slot() > 1, "the state should skip at least one slot");
|
2019-12-09 10:17:57 +00:00
|
|
|
assert_eq!(
|
|
|
|
error,
|
|
|
|
SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError(
|
|
|
|
BeaconStateError::InsufficientValidators
|
|
|
|
)),
|
|
|
|
"should return error indicating that validators have been slashed out"
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-08-08 06:47:24 +00:00
|
|
|
#[test]
|
|
|
|
fn iterators() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-08-08 06:47:24 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
// No need to produce attestations for this test.
|
|
|
|
AttestationStrategy::SomeValidators(vec![]),
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let block_roots: Vec<(Hash256, Slot)> = harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get iter")
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2020-01-06 06:30:37 +00:00
|
|
|
.collect();
|
|
|
|
let state_roots: Vec<(Hash256, Slot)> = harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_state_roots(Slot::new(0))
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get iter")
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2020-01-06 06:30:37 +00:00
|
|
|
.collect();
|
2019-08-08 06:47:24 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
block_roots.len(),
|
|
|
|
state_roots.len(),
|
|
|
|
"should be an equal amount of block and state roots"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
block_roots.iter().any(|(_root, slot)| *slot == 0),
|
|
|
|
"should contain genesis block root"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
state_roots.iter().any(|(_root, slot)| *slot == 0),
|
|
|
|
"should contain genesis state root"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
block_roots.len(),
|
|
|
|
num_blocks_produced as usize + 1,
|
|
|
|
"should contain all produced blocks, plus the genesis block"
|
|
|
|
);
|
|
|
|
|
|
|
|
block_roots.windows(2).for_each(|x| {
|
|
|
|
assert_eq!(
|
|
|
|
x[1].1,
|
2021-07-06 02:38:53 +00:00
|
|
|
x[0].1 + 1,
|
|
|
|
"block root slots should be increasing by one"
|
2019-08-08 06:47:24 +00:00
|
|
|
)
|
|
|
|
});
|
|
|
|
state_roots.windows(2).for_each(|x| {
|
|
|
|
assert_eq!(
|
|
|
|
x[1].1,
|
2021-07-06 02:38:53 +00:00
|
|
|
x[0].1 + 1,
|
|
|
|
"state root slots should be increasing by one"
|
2019-08-08 06:47:24 +00:00
|
|
|
)
|
|
|
|
});
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let head = &harness.chain.head().expect("should get head");
|
2019-08-08 06:47:24 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-06 02:38:53 +00:00
|
|
|
*block_roots.last().expect("should have some block roots"),
|
2020-02-10 23:19:36 +00:00
|
|
|
(head.beacon_block_root, head.beacon_block.slot()),
|
2021-07-06 02:38:53 +00:00
|
|
|
"last block root and slot should be for the head block"
|
2019-08-08 06:47:24 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-06 02:38:53 +00:00
|
|
|
*state_roots.last().expect("should have some state roots"),
|
2021-07-09 06:15:32 +00:00
|
|
|
(head.beacon_state_root(), head.beacon_state.slot()),
|
2021-07-06 02:38:53 +00:00
|
|
|
"last state root and slot should be for the head state"
|
2019-08-08 06:47:24 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-06-17 02:10:46 +00:00
|
|
|
#[test]
|
|
|
|
fn find_reorgs() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1;
|
|
|
|
|
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
// No need to produce attestations for this test.
|
|
|
|
AttestationStrategy::SomeValidators(vec![]),
|
|
|
|
);
|
|
|
|
|
|
|
|
let head_state = harness.chain.head_beacon_state().unwrap();
|
2021-07-09 06:15:32 +00:00
|
|
|
let head_slot = head_state.slot();
|
2021-06-17 02:10:46 +00:00
|
|
|
let genesis_state = harness
|
|
|
|
.chain
|
|
|
|
.state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the
|
|
|
|
// finalized slot.
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.find_reorg_slot(&genesis_state, harness.chain.genesis_block_root)
|
|
|
|
.unwrap(),
|
|
|
|
head_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.finalized_checkpoint()
|
2021-06-17 02:10:46 +00:00
|
|
|
.epoch
|
|
|
|
.start_slot(MinimalEthSpec::slots_per_epoch())
|
|
|
|
);
|
|
|
|
|
|
|
|
// test head
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.find_reorg_slot(
|
|
|
|
&head_state,
|
|
|
|
harness.chain.head_beacon_block().unwrap().canonical_root()
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
head_slot
|
|
|
|
);
|
|
|
|
|
|
|
|
// Re-org back to the slot prior to the head.
|
|
|
|
let prev_slot = head_slot - Slot::new(1);
|
|
|
|
let prev_state = harness
|
|
|
|
.chain
|
|
|
|
.state_at_slot(prev_slot, StateSkipConfig::WithStateRoots)
|
|
|
|
.unwrap();
|
|
|
|
let prev_block_root = harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(prev_slot, WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.find_reorg_slot(&prev_state, prev_block_root)
|
|
|
|
.unwrap(),
|
|
|
|
prev_slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-06-23 04:47:23 +00:00
|
|
|
#[test]
|
2019-07-29 02:08:52 +00:00
|
|
|
fn chooses_fork() {
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let delay = MinimalEthSpec::default_spec().min_attestation_inclusion_delay as usize;
|
|
|
|
|
|
|
|
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (two_thirds..VALIDATOR_COUNT).collect();
|
|
|
|
|
|
|
|
let initial_blocks = delay + 1;
|
|
|
|
let honest_fork_blocks = delay + 1;
|
|
|
|
let faulty_fork_blocks = delay + 2;
|
|
|
|
|
2019-06-24 05:31:36 +00:00
|
|
|
// Build an initial chain where all validators agree.
|
2019-06-23 04:47:23 +00:00
|
|
|
harness.extend_chain(
|
|
|
|
initial_blocks,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
2019-07-29 02:08:52 +00:00
|
|
|
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
|
|
|
&honest_validators,
|
|
|
|
&faulty_validators,
|
2019-06-23 04:47:23 +00:00
|
|
|
honest_fork_blocks,
|
|
|
|
faulty_fork_blocks,
|
|
|
|
);
|
|
|
|
|
2020-05-13 07:05:12 +00:00
|
|
|
assert_ne!(honest_head, faulty_head, "forks should be distinct");
|
2019-06-23 04:47:23 +00:00
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
2019-06-23 04:47:23 +00:00
|
|
|
Slot::from(initial_blocks + honest_fork_blocks),
|
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2020-01-06 06:30:37 +00:00
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
|
|
|
.beacon_block_root,
|
2019-06-23 04:47:23 +00:00
|
|
|
honest_head,
|
|
|
|
"the honest chain should be the canonical chain"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn finalizes_with_full_participation() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
|
|
|
num_blocks_produced,
|
2019-06-23 04:47:23 +00:00
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_epoch(),
|
|
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
|
|
"head should be at the expected epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.current_justified_checkpoint().epoch,
|
2019-06-23 04:47:23 +00:00
|
|
|
state.current_epoch() - 1,
|
|
|
|
"the head should be justified one behind the current epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.finalized_checkpoint().epoch,
|
2019-06-23 04:47:23 +00:00
|
|
|
state.current_epoch() - 2,
|
|
|
|
"the head should be finalized two behind the current epoch"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn finalizes_with_two_thirds_participation() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let attesters = (0..two_thirds).collect();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(attesters),
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
|
|
|
num_blocks_produced,
|
2019-06-23 04:47:23 +00:00
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_epoch(),
|
|
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
|
|
"head should be at the expected epoch"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Note: the 2/3rds tests are not justifying the immediately prior epochs because the
|
|
|
|
// `MIN_ATTESTATION_INCLUSION_DELAY` is preventing an adequate number of attestations being
|
|
|
|
// included in blocks during that epoch.
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.current_justified_checkpoint().epoch,
|
2019-06-23 04:47:23 +00:00
|
|
|
state.current_epoch() - 2,
|
|
|
|
"the head should be justified two behind the current epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.finalized_checkpoint().epoch,
|
2019-06-23 04:47:23 +00:00
|
|
|
state.current_epoch() - 4,
|
|
|
|
"the head should be finalized three behind the current epoch"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn does_not_finalize_with_less_than_two_thirds_participation() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let less_than_two_thirds = two_thirds - 1;
|
|
|
|
let attesters = (0..less_than_two_thirds).collect();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(attesters),
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
|
|
|
num_blocks_produced,
|
2019-06-23 04:47:23 +00:00
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_epoch(),
|
|
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
|
|
"head should be at the expected epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.current_justified_checkpoint().epoch,
|
|
|
|
0,
|
2019-06-23 04:47:23 +00:00
|
|
|
"no epoch should have been justified"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.finalized_checkpoint().epoch,
|
|
|
|
0,
|
2019-06-23 04:47:23 +00:00
|
|
|
"no epoch should have been finalized"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn does_not_finalize_without_attestation() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(vec![]),
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-06-23 04:47:23 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
|
|
|
num_blocks_produced,
|
2019-06-23 04:47:23 +00:00
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_epoch(),
|
|
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
|
|
"head should be at the expected epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.current_justified_checkpoint().epoch,
|
|
|
|
0,
|
2019-06-23 04:47:23 +00:00
|
|
|
"no epoch should have been justified"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.finalized_checkpoint().epoch,
|
|
|
|
0,
|
2019-06-23 04:47:23 +00:00
|
|
|
"no epoch should have been finalized"
|
|
|
|
);
|
|
|
|
}
|
2019-06-26 03:06:08 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn roundtrip_operation_pool() {
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-06-26 03:06:08 +00:00
|
|
|
|
|
|
|
// Add some attestations
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
assert!(harness.chain.op_pool.num_attestations() > 0);
|
|
|
|
|
|
|
|
// TODO: could add some other operations
|
2020-03-06 05:09:41 +00:00
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.persist_op_pool()
|
|
|
|
.expect("should persist op pool");
|
2019-06-26 03:06:08 +00:00
|
|
|
|
2020-03-06 05:09:41 +00:00
|
|
|
let restored_op_pool = harness
|
|
|
|
.chain
|
|
|
|
.store
|
2020-09-30 02:36:07 +00:00
|
|
|
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&OP_POOL_DB_KEY)
|
2020-03-06 05:09:41 +00:00
|
|
|
.expect("should read db")
|
|
|
|
.expect("should find op pool")
|
2021-07-15 00:52:02 +00:00
|
|
|
.into_operation_pool()
|
|
|
|
.unwrap();
|
2019-06-26 03:06:08 +00:00
|
|
|
|
|
|
|
assert_eq!(harness.chain.op_pool, restored_op_pool);
|
|
|
|
}
|
2019-07-29 20:51:42 +00:00
|
|
|
|
|
|
|
#[test]
|
2020-05-06 11:42:56 +00:00
|
|
|
fn unaggregated_attestations_added_to_fork_choice_some_none() {
|
2019-07-29 20:51:42 +00:00
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-07-29 20:51:42 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
let mut fork_choice = harness.chain.fork_choice.write();
|
|
|
|
|
|
|
|
// Move forward a slot so all queued attestations can be processed.
|
|
|
|
harness.advance_slot();
|
|
|
|
fork_choice
|
|
|
|
.update_time(harness.chain.slot().unwrap())
|
|
|
|
.unwrap();
|
2019-07-29 20:51:42 +00:00
|
|
|
|
2019-08-09 01:54:35 +00:00
|
|
|
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
|
|
|
|
.into_iter()
|
|
|
|
.map(|validator_index| {
|
|
|
|
let slot = state
|
|
|
|
.get_attestation_duties(validator_index, RelativeEpoch::Current)
|
2019-07-29 20:51:42 +00:00
|
|
|
.expect("should get attester duties")
|
|
|
|
.unwrap()
|
2019-08-09 01:54:35 +00:00
|
|
|
.slot;
|
|
|
|
|
|
|
|
(validator_index, slot)
|
2019-08-06 17:17:15 +00:00
|
|
|
})
|
|
|
|
.collect();
|
2019-07-29 20:51:42 +00:00
|
|
|
|
|
|
|
for (validator, slot) in validator_slots.clone() {
|
2019-08-09 01:54:35 +00:00
|
|
|
let latest_message = fork_choice.latest_message(validator);
|
2019-07-29 20:51:42 +00:00
|
|
|
|
2019-08-06 17:17:15 +00:00
|
|
|
if slot <= num_blocks_produced && slot != 0 {
|
2019-07-29 20:51:42 +00:00
|
|
|
assert_eq!(
|
2019-08-06 17:17:15 +00:00
|
|
|
latest_message.unwrap().1,
|
2020-01-29 04:05:00 +00:00
|
|
|
slot.epoch(MinimalEthSpec::slots_per_epoch()),
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
"Latest message epoch for {} should be equal to epoch {}.",
|
2019-08-09 01:54:35 +00:00
|
|
|
validator,
|
|
|
|
slot
|
2019-07-29 20:51:42 +00:00
|
|
|
)
|
|
|
|
} else {
|
|
|
|
assert!(
|
|
|
|
latest_message.is_none(),
|
|
|
|
"Latest message slot should be None."
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-07 06:02:30 +00:00
|
|
|
#[test]
|
2019-08-07 06:40:49 +00:00
|
|
|
fn attestations_with_increasing_slots() {
|
2019-08-07 06:02:30 +00:00
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-08-07 06:02:30 +00:00
|
|
|
|
|
|
|
let mut attestations = vec![];
|
|
|
|
|
|
|
|
for _ in 0..num_blocks_produced {
|
|
|
|
harness.extend_chain(
|
|
|
|
2,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
// Don't produce & include any attestations (we'll collect them later).
|
|
|
|
AttestationStrategy::SomeValidators(vec![]),
|
|
|
|
);
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
let head = harness.chain.head().unwrap();
|
|
|
|
let head_state_root = head.beacon_state_root();
|
|
|
|
|
|
|
|
attestations.extend(harness.get_unaggregated_attestations(
|
|
|
|
&AttestationStrategy::AllValidators,
|
|
|
|
&head.beacon_state,
|
|
|
|
head_state_root,
|
|
|
|
head.beacon_block_root,
|
|
|
|
head.beacon_block.slot(),
|
|
|
|
));
|
2019-08-07 06:02:30 +00:00
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
2020-06-18 09:11:03 +00:00
|
|
|
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
2020-03-25 10:14:05 +00:00
|
|
|
let res = harness
|
|
|
|
.chain
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
.verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id));
|
2020-05-06 11:42:56 +00:00
|
|
|
|
|
|
|
let current_slot = harness.chain.slot().expect("should get slot");
|
2020-05-21 00:21:44 +00:00
|
|
|
let expected_attestation_slot = attestation.data.slot;
|
|
|
|
let expected_earliest_permissible_slot =
|
|
|
|
current_slot - MinimalEthSpec::slots_per_epoch() - 1;
|
2020-03-05 06:19:35 +00:00
|
|
|
|
2020-05-21 00:21:44 +00:00
|
|
|
if expected_attestation_slot < expected_earliest_permissible_slot {
|
|
|
|
assert!(matches!(
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
res.err().unwrap(),
|
2020-05-06 11:42:56 +00:00
|
|
|
AttnError::PastSlot {
|
|
|
|
attestation_slot,
|
|
|
|
earliest_permissible_slot,
|
|
|
|
}
|
2020-05-21 00:21:44 +00:00
|
|
|
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
|
|
|
))
|
2020-03-05 06:19:35 +00:00
|
|
|
} else {
|
2020-05-06 11:42:56 +00:00
|
|
|
res.expect("should process attestation");
|
2020-03-05 06:19:35 +00:00
|
|
|
}
|
2019-08-07 06:02:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-29 20:51:42 +00:00
|
|
|
#[test]
|
2020-05-06 11:42:56 +00:00
|
|
|
fn unaggregated_attestations_added_to_fork_choice_all_updated() {
|
2019-07-29 20:51:42 +00:00
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
2019-07-29 20:51:42 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
let mut fork_choice = harness.chain.fork_choice.write();
|
|
|
|
|
|
|
|
// Move forward a slot so all queued attestations can be processed.
|
|
|
|
harness.advance_slot();
|
|
|
|
fork_choice
|
|
|
|
.update_time(harness.chain.slot().unwrap())
|
|
|
|
.unwrap();
|
2019-07-29 20:51:42 +00:00
|
|
|
|
|
|
|
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
|
|
|
let slots: Vec<Slot> = validators
|
|
|
|
.iter()
|
2019-08-06 17:17:15 +00:00
|
|
|
.map(|&v| {
|
|
|
|
state
|
|
|
|
.get_attestation_duties(v, RelativeEpoch::Current)
|
2019-07-29 20:51:42 +00:00
|
|
|
.expect("should get attester duties")
|
|
|
|
.unwrap()
|
|
|
|
.slot
|
2019-08-06 17:17:15 +00:00
|
|
|
})
|
|
|
|
.collect();
|
2019-07-29 20:51:42 +00:00
|
|
|
let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect();
|
|
|
|
|
|
|
|
for (validator, slot) in validator_slots {
|
|
|
|
let latest_message = fork_choice.latest_message(*validator);
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-08-06 17:17:15 +00:00
|
|
|
latest_message.unwrap().1,
|
2020-01-29 04:05:00 +00:00
|
|
|
slot.epoch(MinimalEthSpec::slots_per_epoch()),
|
2019-07-29 20:51:42 +00:00
|
|
|
"Latest message slot should be equal to attester duty."
|
|
|
|
);
|
|
|
|
|
|
|
|
if slot != num_blocks_produced {
|
2019-08-06 17:17:15 +00:00
|
|
|
let block_root = state
|
|
|
|
.get_block_root(slot)
|
2019-07-29 20:51:42 +00:00
|
|
|
.expect("Should get block root at slot");
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-08-06 17:17:15 +00:00
|
|
|
latest_message.unwrap().0,
|
|
|
|
*block_root,
|
2019-07-29 20:51:42 +00:00
|
|
|
"Latest message block root should be equal to block at slot."
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2019-08-06 17:17:15 +00:00
|
|
|
}
|
2019-09-04 00:25:30 +00:00
|
|
|
|
2019-09-04 02:04:15 +00:00
|
|
|
fn run_skip_slot_test(skip_slots: u64) {
|
2019-09-04 00:25:30 +00:00
|
|
|
let num_validators = 8;
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness_a = get_harness(num_validators);
|
2019-09-04 00:25:30 +00:00
|
|
|
let harness_b = get_harness(num_validators);
|
|
|
|
|
|
|
|
for _ in 0..skip_slots {
|
|
|
|
harness_a.advance_slot();
|
|
|
|
harness_b.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
harness_a.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
// No attestation required for test.
|
|
|
|
AttestationStrategy::SomeValidators(vec![]),
|
|
|
|
);
|
|
|
|
|
2019-09-04 02:04:15 +00:00
|
|
|
assert_eq!(
|
2020-01-06 06:30:37 +00:00
|
|
|
harness_a
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
|
|
|
.beacon_block
|
2020-02-10 23:19:36 +00:00
|
|
|
.slot(),
|
2019-09-04 02:04:15 +00:00
|
|
|
Slot::new(skip_slots + 1)
|
|
|
|
);
|
2019-09-04 00:25:30 +00:00
|
|
|
assert_eq!(
|
|
|
|
harness_b
|
|
|
|
.chain
|
2020-01-06 06:30:37 +00:00
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
|
|
|
.beacon_block
|
2020-02-10 23:19:36 +00:00
|
|
|
.slot(),
|
2020-01-06 06:30:37 +00:00
|
|
|
Slot::new(0)
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2020-05-21 00:21:44 +00:00
|
|
|
harness_b
|
|
|
|
.chain
|
|
|
|
.process_block(
|
|
|
|
harness_a
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
|
|
|
.beacon_block
|
|
|
|
.clone(),
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
harness_a
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
2020-05-21 00:21:44 +00:00
|
|
|
.beacon_block_root
|
2019-09-04 00:25:30 +00:00
|
|
|
);
|
2019-09-04 02:04:15 +00:00
|
|
|
|
2019-12-09 07:50:21 +00:00
|
|
|
harness_b
|
|
|
|
.chain
|
|
|
|
.fork_choice()
|
|
|
|
.expect("should run fork choice");
|
|
|
|
|
2019-09-04 02:04:15 +00:00
|
|
|
assert_eq!(
|
2020-01-06 06:30:37 +00:00
|
|
|
harness_b
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should get head")
|
|
|
|
.beacon_block
|
2020-02-10 23:19:36 +00:00
|
|
|
.slot(),
|
2019-09-04 02:04:15 +00:00
|
|
|
Slot::new(skip_slots + 1)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn produces_and_processes_with_genesis_skip_slots() {
|
|
|
|
for i in 0..MinimalEthSpec::slots_per_epoch() * 4 {
|
|
|
|
run_skip_slot_test(i)
|
|
|
|
}
|
2019-09-04 00:25:30 +00:00
|
|
|
}
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn block_roots_skip_slot_behaviour() {
|
|
|
|
let harness = get_harness(VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
// Test should be longer than the block roots to ensure a DB lookup is triggered.
|
2021-07-09 06:15:32 +00:00
|
|
|
let chain_length = harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.unwrap()
|
|
|
|
.beacon_state
|
|
|
|
.block_roots()
|
|
|
|
.len() as u64
|
|
|
|
* 3;
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
|
|
|
|
let skipped_slots = [1, 6, 7, 10, chain_length];
|
|
|
|
|
|
|
|
// Build a chain with some skip slots.
|
|
|
|
for i in 1..=chain_length {
|
|
|
|
if i > 1 {
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = harness.chain.slot().unwrap().as_u64();
|
|
|
|
|
|
|
|
if !skipped_slots.contains(&slot) {
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut prev_unskipped_root = None;
|
|
|
|
|
|
|
|
for target_slot in 0..=chain_length {
|
|
|
|
if skipped_slots.contains(&target_slot) {
|
|
|
|
/*
|
|
|
|
* A skip slot
|
|
|
|
*/
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"WhenSlotSkipped::None should return None on a skip slot"
|
|
|
|
);
|
|
|
|
|
|
|
|
let skipped_root = harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
|
|
|
.unwrap()
|
|
|
|
.expect("WhenSlotSkipped::Prev should always return Some");
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
skipped_root,
|
|
|
|
prev_unskipped_root.expect("test is badly formed"),
|
|
|
|
"WhenSlotSkipped::Prev should accurately return the prior skipped block"
|
|
|
|
);
|
|
|
|
|
|
|
|
let expected_block = harness.chain.get_block(&skipped_root).unwrap().unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap(),
|
|
|
|
expected_block,
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"WhenSlotSkipped::None should return None on a skip slot"
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Not a skip slot
|
|
|
|
*/
|
|
|
|
let skips_none = harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.expect("WhenSlotSkipped::None should return Some for non-skipped block");
|
|
|
|
let skips_prev = harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
|
|
|
.unwrap()
|
|
|
|
.expect("WhenSlotSkipped::Prev should always return Some");
|
|
|
|
assert_eq!(
|
|
|
|
skips_none, skips_prev,
|
|
|
|
"WhenSlotSkipped::None and WhenSlotSkipped::Prev should be equal on non-skipped slot"
|
|
|
|
);
|
|
|
|
|
|
|
|
let expected_block = harness.chain.get_block(&skips_prev).unwrap().unwrap();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_at_slot(target_slot.into(), WhenSlotSkipped::Prev)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap(),
|
|
|
|
expected_block
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_at_slot(target_slot.into(), WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap(),
|
|
|
|
expected_block
|
|
|
|
);
|
|
|
|
|
|
|
|
prev_unskipped_root = Some(skips_prev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A future, non-existent slot.
|
|
|
|
*/
|
|
|
|
|
|
|
|
let future_slot = harness.chain.slot().unwrap() + 1;
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.head().unwrap().beacon_block.slot(),
|
|
|
|
future_slot - 2,
|
|
|
|
"test precondition"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(future_slot, WhenSlotSkipped::None)
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"WhenSlotSkipped::None should return None on a future slot"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.block_root_at_slot(future_slot, WhenSlotSkipped::Prev)
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"WhenSlotSkipped::Prev should return None on a future slot"
|
|
|
|
);
|
|
|
|
}
|