lighthouse/beacon_node/beacon_chain/tests/attestation_production.rs
Pawan Dhananjay b276af98b7
Rework block processing (#4092)
* introduce availability pending block

* add intoavailableblock trait

* small fixes

* add 'gossip blob cache' and start to clean up processing and transition types

* shard memory blob cache

* Initial commit

* Fix after rebase

* Add gossip verification conditions

* cache cleanup

* general chaos

* extended chaos

* cargo fmt

* more progress

* more progress

* tons of changes, just tryna compile

* everything, everywhere, all at once

* Reprocess an ExecutedBlock on unavailable blobs

* Add sus gossip verification for blobs

* Merge stuff

* Remove reprocessing cache stuff

* lint

* Add a wrapper to allow construction of only valid `AvailableBlock`s

* rename blob arc list to blob list

* merge cleanuo

* Revert "merge cleanuo"

This reverts commit 5e98326878c77528d0c4668c5a4db4a4b0fbaeaa.

* Revert "Revert "merge cleanuo""

This reverts commit 3a4009443a5812b3028abe855079307436dc5419.

* fix rpc methods

* move beacon block and blob to eth2/types

* rename gossip blob cache to data availability checker

* lots of changes

* fix some compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* cargo fmt

* use a common data structure for block import types

* fix availability check on proposal import

* refactor the blob cache and split the block wrapper into two types

* add type conversion for signed block and block wrapper

* fix beacon chain tests and do some renaming, add some comments

* Partial processing (#4)

* move beacon block and blob to eth2/types

* rename gossip blob cache to data availability checker

* lots of changes

* fix some compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* fix compilation issues

* cargo fmt

* use a common data structure for block import types

* fix availability check on proposal import

* refactor the blob cache and split the block wrapper into two types

* add type conversion for signed block and block wrapper

* fix beacon chain tests and do some renaming, add some comments

* cargo update (#6)

---------

Co-authored-by: realbigsean <sean@sigmaprime.io>
Co-authored-by: realbigsean <seananderson33@gmail.com>
2023-03-24 17:30:41 -04:00

235 lines
7.6 KiB
Rust

#![cfg(not(debug_assertions))]
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
use lazy_static::lazy_static;
use std::sync::Arc;
use tree_hash::TreeHash;
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
pub const VALIDATOR_COUNT: usize = 16;
lazy_static! {
/// A cached set of keys.
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
/// This test builds a chain that is just long enough to finalize an epoch then it produces an
/// attestation at each slot from genesis through to three epochs past the head.
///
/// It checks the produced attestation against some locally computed values.
#[tokio::test]
async fn produces_attestations() {
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3;
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
.keypairs(KEYPAIRS[..].to_vec())
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
let chain = &harness.chain;
// Test all valid committee indices for all slots in the chain.
// for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
for slot in 0..=num_blocks_produced + additional_slots_tested {
if slot > 0 && slot <= num_blocks_produced {
harness.advance_slot();
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
}
let slot = Slot::from(slot);
let mut state = chain
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
.expect("should get state");
let block_slot = if slot <= num_blocks_produced {
slot
} else {
Slot::from(num_blocks_produced)
};
let blinded_block = chain
.block_at_slot(block_slot, WhenSlotSkipped::Prev)
.expect("should get block")
.expect("block should not be skipped");
let block_root = blinded_block.message().tree_hash_root();
let block = chain
.store
.make_full_block(&block_root, blinded_block)
.unwrap();
let epoch_boundary_slot = state
.current_epoch()
.start_slot(MainnetEthSpec::slots_per_epoch());
let target_root = if state.slot() == epoch_boundary_slot {
block_root
} else {
*state
.get_block_root(epoch_boundary_slot)
.expect("should get target block root")
};
state
.build_committee_cache(RelativeEpoch::Current, &harness.chain.spec)
.unwrap();
let committee_cache = state
.committee_cache(RelativeEpoch::Current)
.expect("should get committee_cache");
let committee_count = committee_cache.committees_per_slot();
for index in 0..committee_count {
let committee_len = committee_cache
.get_beacon_committee(slot, index)
.expect("should get committee for slot")
.committee
.len();
let attestation = chain
.produce_unaggregated_attestation(slot, index)
.expect("should produce attestation");
let data = &attestation.data;
assert_eq!(
attestation.aggregation_bits.len(),
committee_len,
"bad committee len"
);
assert!(
attestation.aggregation_bits.is_zero(),
"some committee bits are set"
);
assert_eq!(
attestation.signature,
AggregateSignature::empty(),
"bad signature"
);
assert_eq!(data.index, index, "bad index");
assert_eq!(data.slot, slot, "bad slot");
assert_eq!(data.beacon_block_root, block_root, "bad block root");
assert_eq!(
data.source,
state.current_justified_checkpoint(),
"bad source"
);
assert_eq!(
data.source,
state.current_justified_checkpoint(),
"bad source"
);
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
assert_eq!(data.target.root, target_root, "bad target root");
let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into();
let available_block = chain
.data_availability_checker
.try_check_availability(block_wrapper)
.unwrap();
let early_attestation = {
let proto_block = chain
.canonical_head
.fork_choice_read_lock()
.get_block(&block_root)
.unwrap();
chain
.early_attester_cache
.add_head_block(
block_root,
available_block,
proto_block,
&state,
&chain.spec,
)
.unwrap();
chain
.early_attester_cache
.try_attest(slot, index, &chain.spec)
.unwrap()
.unwrap()
};
assert_eq!(
attestation, early_attestation,
"early attester cache inconsistent"
);
}
}
}
/// Ensures that the early attester cache wont create an attestation to a block in a later slot than
/// the one requested.
#[tokio::test]
async fn early_attester_cache_old_request() {
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
.keypairs(KEYPAIRS[..].to_vec())
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
harness.advance_slot();
harness
.extend_chain(
2,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let head = harness.chain.head_snapshot();
assert_eq!(head.beacon_block.slot(), 2);
let head_proto_block = harness
.chain
.canonical_head
.fork_choice_read_lock()
.get_block(&head.beacon_block_root)
.unwrap();
let block_wrapper: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into();
let available_block = harness
.chain
.data_availability_checker
.try_check_availability(block_wrapper)
.unwrap();
harness
.chain
.early_attester_cache
.add_head_block(
head.beacon_block_root,
available_block,
head_proto_block,
&head.beacon_state,
&harness.chain.spec,
)
.unwrap();
let attest_slot = head.beacon_block.slot() - 1;
let attestation = harness
.chain
.produce_unaggregated_attestation(attest_slot, 0)
.unwrap();
assert_eq!(attestation.data.slot, attest_slot);
let attested_block = harness
.chain
.get_blinded_block(&attestation.data.beacon_block_root)
.unwrap()
.unwrap();
assert_eq!(attested_block.slot(), attest_slot);
}