Altair consensus changes and refactors (#2279)

## Proposed Changes

Implement the consensus changes necessary for the upcoming Altair hard fork.

## Additional Info

This is quite a heavy refactor, with pivotal types like the `BeaconState` and `BeaconBlock` changing from structs to enums. This ripples through the whole codebase with field accesses changing to methods, e.g. `state.slot` => `state.slot()`.


Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
Michael Sproul 2021-07-09 06:15:32 +00:00
parent 89361573d4
commit b4689e20c6
271 changed files with 9652 additions and 8444 deletions

View File

@ -26,8 +26,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Get latest version of stable Rust
run: rustup update stable
- name: Get latest version of stable Rust
run: rustup update stable
- name: Check formatting with cargo fmt
@ -58,6 +56,18 @@ jobs:
run: choco install -y make
- name: Run tests in release
run: make test-release
beacon-chain-tests:
name: beacon-chain-tests
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Get latest version of stable Rust
run: rustup update stable
- name: Run beacon_chain tests for base hard fork
run: make test-beacon-chain-base
- name: Run beacon_chain tests for Altair hard fork
run: make test-beacon-chain-altair
debug-tests-ubuntu:
name: debug-tests-ubuntu
runs-on: ubuntu-latest

92
Cargo.lock generated
View File

@ -568,7 +568,7 @@ dependencies = [
"genesis",
"int_to_bytes",
"integer-sqrt",
"itertools 0.9.0",
"itertools 0.10.1",
"lazy_static",
"lighthouse_metrics",
"log",
@ -1031,7 +1031,7 @@ dependencies = [
"ansi_term 0.11.0",
"atty",
"bitflags",
"strsim",
"strsim 0.8.0",
"textwrap",
"unicode-width",
"vec_map",
@ -1426,6 +1426,41 @@ dependencies = [
"zeroize",
]
[[package]]
name = "darling"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f2c43f534ea4b0b049015d00269734195e6d3f0f6635cb692251aca6f9f8b3c"
dependencies = [
"darling_core",
"darling_macro",
]
[[package]]
name = "darling_core"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e91455b86830a1c21799d94524df0845183fa55bafd9aa137b01c7d1065fa36"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29b5acf0dea37a7f66f7b25d2c5e93fd46f8f6968b1a5d7a3e02e97768afc95a"
dependencies = [
"darling_core",
"quote",
"syn",
]
[[package]]
name = "darwin-libproc"
version = "0.1.2"
@ -1684,15 +1719,20 @@ dependencies = [
"bls",
"cached_tree_hash",
"compare_fields",
"compare_fields_derive",
"derivative",
"eth2_ssz",
"eth2_ssz_derive",
"ethereum-types 0.9.2",
"fs2",
"hex",
"parking_lot",
"rayon",
"serde",
"serde_derive",
"serde_repr",
"serde_yaml",
"snap",
"state_processing",
"swap_or_not_shuffle",
"tree_hash",
@ -2185,7 +2225,7 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
name = "fallback"
version = "0.1.0"
dependencies = [
"itertools 0.9.0",
"itertools 0.10.1",
]
[[package]]
@ -2954,6 +2994,12 @@ dependencies = [
"tokio-native-tls",
]
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
version = "0.2.3"
@ -4068,7 +4114,7 @@ dependencies = [
"hex",
"if-addrs",
"igd",
"itertools 0.9.0",
"itertools 0.10.1",
"lazy_static",
"lighthouse_metrics",
"logging",
@ -4321,6 +4367,7 @@ dependencies = [
name = "operation_pool"
version = "0.2.0"
dependencies = [
"beacon_chain",
"eth2_ssz",
"eth2_ssz_derive",
"int_to_bytes",
@ -6061,15 +6108,15 @@ name = "state_processing"
version = "0.2.0"
dependencies = [
"arbitrary",
"beacon_chain",
"bls",
"criterion",
"env_logger 0.8.4",
"eth2_hashing",
"eth2_ssz",
"eth2_ssz_types",
"int_to_bytes",
"integer-sqrt",
"itertools 0.9.0",
"itertools 0.10.1",
"lazy_static",
"log",
"merkle_proof",
@ -6078,6 +6125,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_yaml",
"smallvec",
"tree_hash",
"tree_hash_derive",
"types",
@ -6087,7 +6135,9 @@ dependencies = [
name = "state_transition_vectors"
version = "0.1.0"
dependencies = [
"beacon_chain",
"eth2_ssz",
"lazy_static",
"state_processing",
"types",
]
@ -6151,18 +6201,17 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0"
name = "store"
version = "0.2.0"
dependencies = [
"criterion",
"beacon_chain",
"db-key",
"directory",
"eth2_ssz",
"eth2_ssz_derive",
"itertools 0.9.0",
"itertools 0.10.1",
"lazy_static",
"leveldb",
"lighthouse_metrics",
"lru",
"parking_lot",
"rayon",
"serde",
"serde_derive",
"slog",
@ -6201,6 +6250,12 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strum"
version = "0.20.0"
@ -6234,6 +6289,19 @@ version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
[[package]]
name = "superstruct"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bf7f6700d7c135cf4e4900c2cfba9a12ecad1fdc45594aad48f6b344b2589a0"
dependencies = [
"darling",
"itertools 0.10.1",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "swap_or_not_shuffle"
version = "0.2.0"
@ -6757,7 +6825,7 @@ dependencies = [
name = "tree_hash"
version = "0.1.1"
dependencies = [
"criterion",
"beacon_chain",
"eth2_hashing",
"ethereum-types 0.9.2",
"lazy_static",
@ -6820,6 +6888,7 @@ name = "types"
version = "0.2.0"
dependencies = [
"arbitrary",
"beacon_chain",
"bls",
"cached_tree_hash",
"compare_fields",
@ -6834,9 +6903,11 @@ dependencies = [
"ethereum-types 0.9.2",
"hex",
"int_to_bytes",
"itertools 0.10.1",
"lazy_static",
"log",
"merkle_proof",
"parking_lot",
"rand 0.7.3",
"rand_xorshift",
"rayon",
@ -6849,6 +6920,7 @@ dependencies = [
"serde_utils",
"serde_yaml",
"slog",
"superstruct",
"swap_or_not_shuffle",
"tempfile",
"test_random_derive",

View File

@ -1,6 +1,7 @@
.PHONY: tests
EF_TESTS = "testing/ef_tests"
BEACON_CHAIN_CRATE = "beacon_node/beacon_chain"
STATE_TRANSITION_VECTORS = "testing/state_transition_vectors"
GIT_TAG := $(shell git describe --tags --candidates 1)
BIN_DIR = "bin"
@ -79,12 +80,12 @@ build-release-tarballs:
# Runs the full workspace tests in **release**, without downloading any additional
# test vectors.
test-release:
cargo test --all --release --exclude ef_tests
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain
# Runs the full workspace tests in **debug**, without downloading any additional test
# vectors.
test-debug:
cargo test --all --exclude ef_tests
cargo test --workspace --exclude ef_tests --exclude beacon_chain
# Runs cargo-fmt (linter).
cargo-fmt:
@ -92,7 +93,7 @@ cargo-fmt:
# Typechecks benchmark code
check-benches:
cargo check --all --benches
cargo check --workspace --benches
# Typechecks consensus code *without* allowing deprecated legacy arithmetic
check-consensus:
@ -100,9 +101,17 @@ check-consensus:
# Runs only the ef-test vectors.
run-ef-tests:
rm -rf $(EF_TESTS)/.accessed_file_log.txt
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto"
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro"
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/eth2.0-spec-tests
# Run the tests in the `beacon_chain` crate.
test-beacon-chain: test-beacon-chain-base test-beacon-chain-altair
test-beacon-chain-%:
env FORK_NAME=$* cargo test --release --features fork_from_env --manifest-path=$(BEACON_CHAIN_CRATE)/Cargo.toml
# Runs only the tests/state_transition_vectors tests.
run-state-transition-tests:
@ -121,7 +130,7 @@ test-full: cargo-fmt test-release test-debug test-ef
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
lint:
cargo clippy --all --tests -- \
cargo clippy --workspace --tests -- \
-D warnings \
-A clippy::from-over-into \
-A clippy::upper-case-acronyms \

View File

@ -116,7 +116,7 @@ async fn publish_voluntary_exit<E: EthSpec>(
.beacon_state::<E>()
.as_ref()
.expect("network should have valid genesis state")
.genesis_validators_root;
.genesis_validators_root();
// Verify that the beacon node and validator being exited are on the same network.
if genesis_data.genesis_validators_root != testnet_genesis_root {

View File

@ -77,7 +77,7 @@ pub fn cli_run<T: EthSpec>(
let genesis_validators_root = testnet_config
.beacon_state::<T>()
.map(|state: BeaconState<T>| state.genesis_validators_root)
.map(|state: BeaconState<T>| state.genesis_validators_root())
.map_err(|e| {
format!(
"Unable to get genesis state, has genesis occurred? Detail: {:?}",

View File

@ -9,9 +9,9 @@ default = ["participation_metrics"]
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
test_logger = [] # Print log output to stderr when running tests instead of dropping it
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
[dev-dependencies]
int_to_bytes = { path = "../../consensus/int_to_bytes" }
maplit = "1.0.2"
environment = { path = "../../lighthouse/environment" }
@ -45,6 +45,7 @@ eth1 = { path = "../eth1" }
futures = "0.3.7"
genesis = { path = "../genesis" }
integer-sqrt = "0.1.5"
int_to_bytes = { path = "../../consensus/int_to_bytes" }
rand = "0.7.3"
rand_core = "0.6.2"
proto_array = { path = "../../consensus/proto_array" }
@ -56,7 +57,7 @@ safe_arith = { path = "../../consensus/safe_arith" }
fork_choice = { path = "../../consensus/fork_choice" }
task_executor = { path = "../../common/task_executor" }
derivative = "2.1.1"
itertools = "0.9.0"
itertools = "0.10.0"
regex = "1.3.9"
exit-future = "0.2.0"
slasher = { path = "../../slasher" }

View File

@ -895,7 +895,7 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or(BeaconChainError::CanonicalHeadLockTimeout)
.map(|head| head.beacon_state.fork)?;
.map(|head| head.beacon_state.fork())?;
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
@ -1001,7 +1001,7 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or(BeaconChainError::CanonicalHeadLockTimeout)
.map(|head| head.beacon_state.fork)?;
.map(|head| head.beacon_state.fork())?;
let signature_sets = vec![
signed_aggregate_selection_proof_signature_set(

View File

@ -454,7 +454,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let old_block_root = snapshot.beacon_block_root;
// The earliest slot for which the two chains may have a common history.
let lowest_slot = std::cmp::min(new_state.slot, old_state.slot);
let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot());
// Create an iterator across `$state`, assuming that the block at `$state.slot` has the
// block root of `$block_root`.
@ -465,7 +465,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// in all the iterator wrapping.
macro_rules! aligned_roots_iter {
($state: ident, $block_root: ident) => {
std::iter::once(Ok(($state.slot, $block_root)))
std::iter::once(Ok(($state.slot(), $block_root)))
.chain($state.rev_iter_block_roots(&self.spec))
.skip_while(|result| {
result
@ -506,7 +506,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// We provide this potentially-inaccurate-but-safe information to avoid onerous
// database reads during times of deep reorgs.
Ok(old_state
.finalized_checkpoint
.finalized_checkpoint()
.epoch
.start_slot(T::EthSpec::slots_per_epoch()))
})
@ -526,7 +526,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state_root: Hash256,
state: &'a BeaconState<T::EthSpec>,
) -> impl Iterator<Item = Result<(Hash256, Slot), Error>> + 'a {
std::iter::once(Ok((state_root, state.slot)))
std::iter::once(Ok((state_root, state.slot())))
.chain(StateRootsIterator::new(self.store.clone(), state))
.map(|result| result.map_err(Into::into))
}
@ -570,7 +570,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let root = self.block_root_at_slot(request_slot, skips)?;
if let Some(block_root) = root {
Ok(self.store.get_item(&block_root)?)
Ok(self.store.get_block(&block_root)?)
} else {
Ok(None)
}
@ -661,7 +661,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let state = &head.beacon_state;
// Try find the root for the `request_slot`.
let request_root_opt = match state.slot.cmp(&request_slot) {
let request_root_opt = match state.slot().cmp(&request_slot) {
// It's always a skip slot if the head is less than the request slot, return early.
Ordering::Less => return Ok(Some(None)),
// The request slot is the head slot.
@ -836,11 +836,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
slot: head.beacon_block.slot(),
block_root: head.beacon_block_root,
state_root: head.beacon_state_root(),
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint,
finalized_checkpoint: head.beacon_state.finalized_checkpoint,
fork: head.beacon_state.fork,
genesis_time: head.beacon_state.genesis_time,
genesis_validators_root: head.beacon_state.genesis_validators_root,
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(),
finalized_checkpoint: head.beacon_state.finalized_checkpoint(),
fork: head.beacon_state.fork(),
genesis_time: head.beacon_state.genesis_time(),
genesis_validators_root: head.beacon_state.genesis_validators_root(),
proposer_shuffling_decision_root,
})
})
@ -868,23 +868,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<BeaconState<T::EthSpec>, Error> {
let head_state = self.head()?.beacon_state;
match slot.cmp(&head_state.slot) {
match slot.cmp(&head_state.slot()) {
Ordering::Equal => Ok(head_state),
Ordering::Greater => {
if slot > head_state.slot + T::EthSpec::slots_per_epoch() {
if slot > head_state.slot() + T::EthSpec::slots_per_epoch() {
warn!(
self.log,
"Skipping more than an epoch";
"head_slot" => head_state.slot,
"head_slot" => head_state.slot(),
"request_slot" => slot
)
}
let start_slot = head_state.slot;
let start_slot = head_state.slot();
let task_start = Instant::now();
let max_task_runtime = Duration::from_secs(self.spec.seconds_per_slot);
let head_state_slot = head_state.slot;
let head_state_slot = head_state.slot();
let mut state = head_state;
let skip_state_root = match config {
@ -892,7 +892,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()),
};
while state.slot < slot {
while state.slot() < slot {
// Do not allow and forward state skip that takes longer than the maximum task duration.
//
// This is a protection against nodes doing too much work when they're not synced
@ -1046,7 +1046,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state: &BeaconState<T::EthSpec>,
) -> Result<Option<Hash256>, Error> {
let iter = BlockRootsIterator::new(self.store.clone(), state);
let iter_with_head = std::iter::once(Ok((beacon_block_root, state.slot)))
let iter_with_head = std::iter::once(Ok((beacon_block_root, state.slot())))
.chain(iter)
.map(|result| result.map_err(|e| e.into()));
@ -1176,7 +1176,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<Attestation<T::EthSpec>, Error> {
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
if state.slot > slot {
if state.slot() > slot {
return Err(Error::CannotAttestToFutureState);
} else if state.current_epoch() < epoch {
let mut_state = state.to_mut();
@ -1194,7 +1194,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let committee_len = state.get_beacon_committee(slot, index)?.committee.len();
let target_slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
let target_root = if state.slot <= target_slot {
let target_root = if state.slot() <= target_slot {
beacon_block_root
} else {
*state.get_block_root(target_slot)?
@ -1206,7 +1206,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
slot,
index,
beacon_block_root,
source: state.current_justified_checkpoint,
source: state.current_justified_checkpoint(),
target: Checkpoint {
epoch,
root: target_root,
@ -1347,12 +1347,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If there's no eth1 chain then it's impossible to produce blocks and therefore
// useless to put things in the op pool.
if self.eth1_chain.is_some() {
let fork = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or(Error::CanonicalHeadLockTimeout)?
.beacon_state
.fork;
let fork =
self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?;
self.op_pool
.insert_attestation(
@ -1563,6 +1559,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.collect::<Vec<_>>();
for (i, block) in chain_segment.into_iter().enumerate() {
// Ensure the block is the correct structure for the fork at `block.slot()`.
if let Err(e) = block.fork_name(&self.spec) {
return ChainSegmentResult::Failed {
imported_blocks,
error: BlockError::InconsistentFork(e),
};
}
let block_root = get_block_root(&block);
if let Some((child_parent_root, child_slot)) = children.get(i) {
@ -1691,8 +1695,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self,
block: SignedBeaconBlock<T::EthSpec>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
let slot = block.message.slot;
let graffiti_string = block.message.body.graffiti.as_utf8_lossy();
let slot = block.slot();
let graffiti_string = block.message().body().graffiti().as_utf8_lossy();
match GossipVerifiedBlock::new(block, self) {
Ok(verified) => {
@ -1809,7 +1813,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Iterate through the attestations in the block and register them as an "observed
// attestation". This will stop us from propagating them on the gossip network.
for a in &signed_block.message.body.attestations {
for a in signed_block.message().body().attestations() {
match self
.observed_attestations
.write()
@ -1828,7 +1832,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If a slasher is configured, provide the attestations from the block.
if let Some(slasher) = self.slasher.as_ref() {
for attestation in &signed_block.message.body.attestations {
for attestation in signed_block.message().body().attestations() {
let committee =
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
let indexed_attestation =
@ -1874,11 +1878,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Do not import a block that doesn't descend from the finalized root.
let signed_block =
check_block_is_finalized_descendant::<T, _>(signed_block, &fork_choice, &self.store)?;
let block = &signed_block.message;
let (block, block_signature) = signed_block.clone().deconstruct();
// compare the existing finalized checkpoint with the incoming block's finalized checkpoint
let old_finalized_checkpoint = fork_choice.finalized_checkpoint();
let new_finalized_checkpoint = state.finalized_checkpoint;
let new_finalized_checkpoint = state.finalized_checkpoint();
// Only perform the weak subjectivity check if it was configured.
if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint {
@ -1894,7 +1898,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.log,
"Weak subjectivity checkpoint verification failed while importing block!";
"block_root" => ?block_root,
"parent_root" => ?block.parent_root,
"parent_root" => ?block.parent_root(),
"old_finalized_epoch" => ?old_finalized_checkpoint.epoch,
"new_finalized_epoch" => ?new_finalized_checkpoint.epoch,
"weak_subjectivity_epoch" => ?wss_checkpoint.epoch,
@ -1916,7 +1920,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let _fork_choice_block_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
fork_choice
.on_block(current_slot, block, block_root, &state)
.on_block(current_slot, &block, block_root, &state)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
}
@ -1927,7 +1931,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let validator_monitor = self.validator_monitor.read();
// Register each attestation in the block with the fork choice service.
for attestation in &block.body.attestations[..] {
for attestation in block.body().attestations() {
let _fork_choice_attestation_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
@ -1947,26 +1951,26 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Only register this with the validator monitor when the block is sufficiently close to
// the current slot.
if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch()
+ block.slot.as_u64()
+ block.slot().as_u64()
>= current_slot.as_u64()
{
validator_monitor.register_attestation_in_block(
&indexed_attestation,
&block,
block.to_ref(),
&self.spec,
);
}
}
for exit in &block.body.voluntary_exits {
for exit in block.body().voluntary_exits() {
validator_monitor.register_block_voluntary_exit(&exit.message)
}
for slashing in &block.body.attester_slashings {
for slashing in block.body().attester_slashings() {
validator_monitor.register_block_attester_slashing(slashing)
}
for slashing in &block.body.proposer_slashings {
for slashing in block.body().proposer_slashings() {
validator_monitor.register_block_proposer_slashing(slashing)
}
@ -1974,7 +1978,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::observe(
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
block.body.attestations.len() as f64,
block.body().attestations().len() as f64,
);
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
@ -1984,11 +1988,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If the write fails, revert fork choice to the version from disk, else we can
// end up with blocks in fork choice that are missing from disk.
// See https://github.com/sigp/lighthouse/issues/2028
ops.push(StoreOp::PutBlock(
block_root,
Box::new(signed_block.clone()),
));
ops.push(StoreOp::PutState(block.state_root, &state));
ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block)));
ops.push(StoreOp::PutState(block.state_root(), &state));
let txn_lock = self.store.hot_db.begin_rw_transaction();
if let Err(e) = self.store.do_atomically(ops) {
@ -2024,11 +2025,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// about it.
metrics::observe_duration(
&metrics::BEACON_BLOCK_IMPORTED_SLOT_START_DELAY_TIME,
get_block_delay_ms(timestamp_now(), &signed_block.message, &self.slot_clock),
get_block_delay_ms(timestamp_now(), block.to_ref(), &self.slot_clock),
);
let parent_root = block.parent_root;
let slot = block.slot;
let parent_root = block.parent_root();
let slot = block.slot();
let signed_block = SignedBeaconBlock::from_block(block, block_signature);
self.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
@ -2167,10 +2169,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or(BlockProductionError::NoEth1ChainConnection)?;
// It is invalid to try to produce a block using a state from a future slot.
if state.slot > produce_at_slot {
if state.slot() > produce_at_slot {
return Err(BlockProductionError::StateSlotTooHigh {
produce_at_slot,
state_slot: state.slot,
state_slot: state.slot(),
});
}
@ -2183,16 +2185,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
let parent_root = if state.slot > 0 {
let parent_root = if state.slot() > 0 {
*state
.get_block_root(state.slot - 1)
.get_block_root(state.slot() - 1)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
} else {
state.latest_block_header.canonical_root()
state.latest_block_header().canonical_root()
};
let (proposer_slashings, attester_slashings) =
self.op_pool.get_slashings(&state, &self.spec);
let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state);
let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?;
let deposits = eth1_chain
@ -2206,8 +2207,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
for attestation in self.naive_aggregation_pool.read().iter() {
if let Err(e) = self.op_pool.insert_attestation(
attestation.clone(),
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
) {
// Don't stop block production if there's an error, just create a log.
@ -2250,13 +2251,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.into();
drop(attestation_packing_timer);
let mut block = SignedBeaconBlock {
message: BeaconBlock {
slot: state.slot,
proposer_index: state.get_beacon_proposer_index(state.slot, &self.spec)? as u64,
let slot = state.slot();
let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64;
let voluntary_exits = self.op_pool.get_voluntary_exits(&state, &self.spec).into();
let inner_block = match state {
BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase {
slot,
proposer_index,
parent_root,
state_root: Hash256::zero(),
body: BeaconBlockBody {
body: BeaconBlockBodyBase {
randao_reveal,
eth1_data,
graffiti,
@ -2264,13 +2269,35 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
attester_slashings: attester_slashings.into(),
attestations,
deposits,
voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(),
voluntary_exits,
},
},
// The block is not signed here, that is the task of a validator client.
signature: Signature::empty(),
}),
BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair {
slot,
proposer_index,
parent_root,
state_root: Hash256::zero(),
body: BeaconBlockBodyAltair {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings: proposer_slashings.into(),
attester_slashings: attester_slashings.into(),
attestations,
deposits,
voluntary_exits,
// FIXME(altair): put a sync aggregate from the pool here (once implemented)
sync_aggregate: SyncAggregate::new(),
},
}),
};
let block = SignedBeaconBlock::from_block(
inner_block,
// The block is not signed here, that is the task of a validator client.
Signature::empty(),
);
let process_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_PROCESS_TIMES);
per_block_processing(
&mut state,
@ -2285,19 +2312,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let state_root = state.update_tree_hash_cache()?;
drop(state_root_timer);
block.message.state_root = state_root;
let (mut block, _) = block.deconstruct();
*block.state_root_mut() = state_root;
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
trace!(
self.log,
"Produced beacon block";
"parent" => %block.message.parent_root,
"attestations" => block.message.body.attestations.len(),
"slot" => block.message.slot
"parent" => %block.parent_root(),
"attestations" => block.body().attestations().len(),
"slot" => block.slot()
);
Ok((block.message, state))
Ok((block, state))
}
/// Execute the fork choice algorithm and enthrone the result as the canonical head.
@ -2403,16 +2431,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
debug!(
self.log,
"Head beacon block";
"justified_root" => %new_head.beacon_state.current_justified_checkpoint.root,
"justified_epoch" => new_head.beacon_state.current_justified_checkpoint.epoch,
"finalized_root" => %new_head.beacon_state.finalized_checkpoint.root,
"finalized_epoch" => new_head.beacon_state.finalized_checkpoint.epoch,
"justified_root" => %new_head.beacon_state.current_justified_checkpoint().root,
"justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch,
"finalized_root" => %new_head.beacon_state.finalized_checkpoint().root,
"finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch,
"root" => %beacon_block_root,
"slot" => new_head.beacon_block.slot(),
);
};
let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint;
let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint();
// It is an error to try to update to a head with a lesser finalized epoch.
if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch {
@ -2425,7 +2453,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch())
< new_head
.beacon_state
.slot
.slot()
.epoch(T::EthSpec::slots_per_epoch());
if is_epoch_transition || is_reorg {
@ -2438,7 +2466,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// These fields are used for server-sent events
let state_root = new_head.beacon_state_root();
let head_slot = new_head.beacon_state.slot;
let head_slot = new_head.beacon_state.slot();
let target_epoch_start_slot = new_head
.beacon_state
.current_epoch()
@ -2513,7 +2541,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// the reach of the new head's `state_roots` array.
let new_finalized_slot = head
.beacon_state
.finalized_checkpoint
.finalized_checkpoint()
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
let new_finalized_state_root = process_results(
@ -2592,7 +2620,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
beacon_block_root: Hash256,
state: &BeaconState<T::EthSpec>,
) -> Result<(), BeaconChainError> {
let finalized_checkpoint = state.finalized_checkpoint;
let finalized_checkpoint = state.finalized_checkpoint();
info!(self.log, "Verifying the configured weak subjectivity checkpoint"; "weak_subjectivity_epoch" => wss_checkpoint.epoch, "weak_subjectivity_root" => ?wss_checkpoint.root);
// If epochs match, simply compare roots.
if wss_checkpoint.epoch == finalized_checkpoint.epoch
@ -2653,7 +2681,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
new_finalized_state_root: Hash256,
) -> Result<(), Error> {
self.fork_choice.write().prune()?;
let new_finalized_checkpoint = head_state.finalized_checkpoint;
let new_finalized_checkpoint = head_state.finalized_checkpoint();
self.observed_block_producers.write().prune(
new_finalized_checkpoint
@ -2870,13 +2898,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}
/// Returns `true` if the given block root has not been processed.
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
Ok(!self
.store
.item_exists::<SignedBeaconBlock<T::EthSpec>>(beacon_block_root)?)
}
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
///
/// This could be a very expensive operation and should only be done in testing/analysis
@ -2984,9 +3005,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.get_state(&block.state_root(), Some(block.slot()))
.unwrap()
.unwrap();
finalized_blocks.insert(state.finalized_checkpoint.root);
justified_blocks.insert(state.current_justified_checkpoint.root);
justified_blocks.insert(state.previous_justified_checkpoint.root);
finalized_blocks.insert(state.finalized_checkpoint().root);
justified_blocks.insert(state.current_justified_checkpoint().root);
justified_blocks.insert(state.previous_justified_checkpoint().root);
}
if block_hash == canonical_head_hash {

View File

@ -10,10 +10,7 @@ use ssz_derive::{Decode, Encode};
use std::marker::PhantomData;
use std::sync::Arc;
use store::{Error as StoreError, HotColdDB, ItemStore};
use types::{
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock,
Slot,
};
use types::{BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot};
#[derive(Debug)]
pub enum Error {
@ -45,7 +42,7 @@ const MAX_BALANCE_CACHE_SIZE: usize = 4;
/// zero.
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
state
.validators
.validators()
.iter()
.map(|validator| {
if validator.is_active_at(state.current_epoch()) {
@ -91,7 +88,7 @@ impl BalancesCache {
}
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
let epoch_boundary_root = if epoch_boundary_slot == state.slot() {
block_root
} else {
// This call remains sensible as long as `state.block_roots` is larger than a single
@ -127,7 +124,7 @@ impl BalancesCache {
let mut prior_block_found = false;
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
if slot < state.slot {
if slot < state.slot() {
if *state.get_block_root(slot)? != block_root {
prior_block_found = true;
break;
@ -208,7 +205,7 @@ where
anchor: &BeaconSnapshot<E>,
) -> Self {
let anchor_state = &anchor.beacon_state;
let mut anchor_block_header = anchor_state.latest_block_header.clone();
let mut anchor_block_header = anchor_state.latest_block_header().clone();
if anchor_block_header.state_root == Hash256::zero() {
anchor_block_header.state_root = anchor.beacon_state_root();
}
@ -223,9 +220,9 @@ where
Self {
store,
balances_cache: <_>::default(),
time: anchor_state.slot,
time: anchor_state.slot(),
justified_checkpoint,
justified_balances: anchor_state.balances.clone().into(),
justified_balances: anchor_state.balances().clone().into(),
finalized_checkpoint,
best_justified_checkpoint: justified_checkpoint,
_phantom: PhantomData,
@ -318,17 +315,20 @@ where
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let justified_block = self
.store
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
.get_block(&self.justified_checkpoint.root)
.map_err(Error::FailedToReadBlock)?
.ok_or(Error::MissingBlock(self.justified_checkpoint.root))?
.message;
.deconstruct()
.0;
// FIXME(altair): could remove clone with by-value `balances` accessor
self.justified_balances = self
.store
.get_state(&justified_block.state_root, Some(justified_block.slot))
.get_state(&justified_block.state_root(), Some(justified_block.slot()))
.map_err(Error::FailedToReadState)?
.ok_or(Error::MissingState(justified_block.state_root))?
.balances
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?
.balances()
.clone()
.into();
}

View File

@ -1,10 +1,9 @@
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode};
use types::{beacon_state::CloneConfig, BeaconState, EthSpec, Hash256, SignedBeaconBlock};
/// Represents some block and its associated state. Generally, this will be used for tracking the
/// head, justified head and finalized head.
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
#[derive(Clone, Serialize, PartialEq, Debug)]
pub struct BeaconSnapshot<E: EthSpec> {
pub beacon_block: SignedBeaconBlock<E>,
pub beacon_block_root: Hash256,
@ -31,7 +30,7 @@ impl<E: EthSpec> BeaconSnapshot<E> {
///
/// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`.
pub fn beacon_state_root(&self) -> Hash256 {
self.beacon_block.message.state_root
self.beacon_block.message().state_root()
}
/// Update all fields of the checkpoint.

View File

@ -71,8 +71,8 @@ use std::io::Write;
use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp};
use tree_hash::TreeHash;
use types::{
BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256,
PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256,
InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
};
/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed.
@ -219,6 +219,12 @@ pub enum BlockError<T: EthSpec> {
///
/// The block is invalid and the peer is faulty.
WeakSubjectivityConflict,
/// The block has the wrong structure for the fork at `block.slot`.
///
/// ## Peer scoring
///
/// The block is invalid and the peer is faulty.
InconsistentFork(InconsistentFork),
}
impl<T: EthSpec> std::fmt::Display for BlockError<T> {
@ -477,6 +483,11 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
block: SignedBeaconBlock<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
block
.fork_name(&chain.spec)
.map_err(BlockError::InconsistentFork)?;
// Do not gossip or process blocks from future slots.
let present_slot_with_tolerance = chain
.slot_clock
@ -492,7 +503,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
let block_root = get_block_root(&block);
// Do not gossip a block from a finalized slot.
check_block_against_finalized_slot(&block.message, chain)?;
check_block_against_finalized_slot(block.message(), chain)?;
// Check if the block is already known. We know it is post-finalization, so it is
// sufficient to check the fork choice.
@ -509,12 +520,12 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
if chain
.observed_block_producers
.read()
.proposer_has_been_observed(&block.message)
.proposer_has_been_observed(block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message.proposer_index,
slot: block.message.slot,
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
@ -563,7 +574,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
};
// Reject any block that exceeds our limit on skipped slots.
check_block_skip_slots(chain, parent_block.slot, &block.message)?;
check_block_skip_slots(chain, parent_block.slot, block.message())?;
// We assign to a variable instead of using `if let Some` directly to ensure we drop the
// write lock before trying to acquire it again in the `else` clause.
@ -607,17 +618,17 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
block_epoch,
proposer_shuffling_decision_block,
proposers,
state.fork,
state.fork(),
)?;
(proposer_index, state.fork, Some(parent), block)
(proposer_index, state.fork(), Some(parent), block)
};
let signature_is_valid = {
let pubkey_cache = get_validator_pubkey_cache(chain)?;
let pubkey = pubkey_cache
.get(block.message.proposer_index as usize)
.ok_or(BlockError::UnknownValidator(block.message.proposer_index))?;
.get(block.message().proposer_index() as usize)
.ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?;
block.verify_signature(
Some(block_root),
pubkey,
@ -639,18 +650,18 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
if chain
.observed_block_producers
.write()
.observe_proposer(&block.message)
.observe_proposer(block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message.proposer_index,
slot: block.message.slot,
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
if block.message.proposer_index != expected_proposer as u64 {
if block.message().proposer_index() != expected_proposer as u64 {
return Err(BlockError::IncorrectBlockProposer {
block: block.message.proposer_index,
block: block.message().proposer_index(),
local_shuffling: expected_proposer as u64,
});
}
@ -693,10 +704,15 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
block
.fork_name(&chain.spec)
.map_err(BlockError::InconsistentFork)?;
let (mut parent, block) = load_parent(block, chain)?;
// Reject any block that exceeds our limit on skipped slots.
check_block_skip_slots(chain, parent.beacon_block.slot(), &block.message)?;
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
let state = cheap_state_advance_to_obtain_committees(
&mut parent.pre_state,
@ -860,7 +876,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
}
// Reject any block that exceeds our limit on skipped slots.
check_block_skip_slots(chain, parent.beacon_block.slot(), &block.message)?;
check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?;
/*
* Perform cursory checks to see if the block is even worth processing.
@ -896,20 +912,20 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
// Perform a sanity check on the pre-state.
let parent_slot = parent.beacon_block.slot();
if state.slot < parent_slot || state.slot > parent_slot + 1 {
if state.slot() < parent_slot || state.slot() > parent_slot + 1 {
return Err(BeaconChainError::BadPreState {
parent_root: parent.beacon_block_root,
parent_slot,
block_root,
block_slot: block.slot(),
state_slot: state.slot,
state_slot: state.slot(),
}
.into());
}
let distance = block.slot().as_u64().saturating_sub(state.slot.as_u64());
let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64());
for _ in 0..distance {
let state_root = if parent.beacon_block.slot() == state.slot {
let state_root = if parent.beacon_block.slot() == state.slot() {
// If it happens that `pre_state` has *not* already been advanced forward a single
// slot, then there is no need to compute the state root for this
// `per_slot_processing` call since that state root is already stored in the parent
@ -935,7 +951,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
vec![]
} else {
vec![
if state.slot % T::EthSpec::slots_per_epoch() == 0 {
if state.slot() % T::EthSpec::slots_per_epoch() == 0 {
StoreOp::PutState(state_root, &state)
} else {
StoreOp::PutStateSummary(
@ -1070,14 +1086,14 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
fn check_block_skip_slots<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
parent_slot: Slot,
block: &BeaconBlock<T::EthSpec>,
block: BeaconBlockRef<'_, T::EthSpec>,
) -> Result<(), BlockError<T::EthSpec>> {
// Reject any block that exceeds our limit on skipped slots.
if let Some(max_skip_slots) = chain.config.import_max_skip_slots {
if block.slot > parent_slot + max_skip_slots {
if block.slot() > parent_slot + max_skip_slots {
return Err(BlockError::TooManySkippedSlots {
parent_slot,
block_slot: block.slot,
block_slot: block.slot(),
});
}
}
@ -1090,7 +1106,7 @@ fn check_block_skip_slots<T: BeaconChainTypes>(
/// Returns an error if the block is earlier or equal to the finalized slot, or there was an error
/// verifying that condition.
fn check_block_against_finalized_slot<T: BeaconChainTypes>(
block: &BeaconBlock<T::EthSpec>,
block: BeaconBlockRef<'_, T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<(), BlockError<T::EthSpec>> {
let finalized_slot = chain
@ -1099,9 +1115,9 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
if block.slot <= finalized_slot {
if block.slot() <= finalized_slot {
Err(BlockError::WouldRevertFinalizedSlot {
block_slot: block.slot,
block_slot: block.slot(),
finalized_slot,
})
} else {
@ -1127,7 +1143,7 @@ pub fn check_block_is_finalized_descendant<T: BeaconChainTypes, F: ForkChoiceSto
// 2. The parent is unknown to us, we probably want to download it since it might actually
// descend from the finalized root.
if store
.item_exists::<SignedBeaconBlock<T::EthSpec>>(&block.parent_root())
.block_exists(&block.parent_root())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
Err(BlockError::NotFinalizedDescendant {
@ -1151,24 +1167,24 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
block_root: Option<Hash256>,
chain: &BeaconChain<T>,
) -> Result<Hash256, BlockError<T::EthSpec>> {
let block = &signed_block.message;
let block = signed_block.message();
// Do not process blocks from the future.
if block.slot > chain.slot()? {
if block.slot() > chain.slot()? {
return Err(BlockError::FutureSlot {
present_slot: chain.slot()?,
block_slot: block.slot,
block_slot: block.slot(),
});
}
// Do not re-process the genesis block.
if block.slot == 0 {
if block.slot() == 0 {
return Err(BlockError::GenesisBlock);
}
// This is an artificial (non-spec) restriction that provides some protection from overflow
// abuses.
if block.slot >= MAXIMUM_BLOCK_SLOT_NUMBER {
if block.slot() >= MAXIMUM_BLOCK_SLOT_NUMBER {
return Err(BlockError::BlockSlotLimitReached);
}
@ -1209,7 +1225,7 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
if let Some(proto_block) = chain
.fork_choice
.read()
.get_block(&block.message.parent_root)
.get_block(&block.message().parent_root())
{
Ok((proto_block, block))
} else {
@ -1327,10 +1343,10 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
state.build_committee_cache(RelativeEpoch::Current, spec)?;
Ok(Cow::Borrowed(state))
} else if state.slot > block_slot {
} else if state.slot() > block_slot {
Err(BlockError::BlockIsNotLaterThanParent {
block_slot,
parent_slot: state.slot,
parent_slot: state.slot(),
})
} else {
let mut state = state.clone_with(CloneConfig::committee_caches_only());
@ -1372,7 +1388,7 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>(
move |validator_index| {
// Disallow access to any validator pubkeys that are not in the current beacon
// state.
if validator_index < state.validators.len() {
if validator_index < state.validators().len() {
validator_pubkey_cache
.get(validator_index)
.map(|pk| Cow::Borrowed(pk))
@ -1398,8 +1414,8 @@ fn verify_header_signature<T: BeaconChainTypes>(
let (fork, genesis_validators_root) = chain
.with_head(|head| {
Ok((
head.beacon_state.fork,
head.beacon_state.genesis_validators_root,
head.beacon_state.fork(),
head.beacon_state.genesis_validators_root(),
))
})
.map_err(|e: BlockError<T::EthSpec>| e)?;
@ -1458,7 +1474,7 @@ fn participation_ratio(section: u64, total: u64) -> Option<f64> {
fn write_state<T: EthSpec>(prefix: &str, state: &BeaconState<T>, log: &Logger) {
if WRITE_BLOCK_PROCESSING_SSZ {
let root = state.tree_hash_root();
let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root);
let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot(), root);
let mut path = std::env::temp_dir().join("lighthouse");
let _ = fs::create_dir_all(path.clone());
path = path.join(filename);
@ -1479,7 +1495,7 @@ fn write_state<T: EthSpec>(prefix: &str, state: &BeaconState<T>, log: &Logger) {
fn write_block<T: EthSpec>(block: &SignedBeaconBlock<T>, root: Hash256, log: &Logger) {
if WRITE_BLOCK_PROCESSING_SSZ {
let filename = format!("block_slot_{}_root{}.ssz", block.message.slot, root);
let filename = format!("block_slot_{}_root{}.ssz", block.slot(), root);
let mut path = std::env::temp_dir().join("lighthouse");
let _ = fs::create_dir_all(path.clone());
path = path.join(filename);

View File

@ -236,7 +236,7 @@ where
.ok_or("Fork choice not found in store")?;
let genesis_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&chain.genesis_block_root)
.get_block(&chain.genesis_block_root)
.map_err(|e| format!("DB error when reading genesis block: {:?}", e))?
.ok_or("Genesis block not found in store")?;
let genesis_state = store
@ -244,7 +244,7 @@ where
.map_err(|e| format!("DB error when reading genesis state: {:?}", e))?
.ok_or("Genesis block not found in store")?;
self.genesis_time = Some(genesis_state.genesis_time);
self.genesis_time = Some(genesis_state.genesis_time());
self.op_pool = Some(
store
@ -282,7 +282,7 @@ where
.build_all_caches(&self.spec)
.map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?;
let beacon_state_root = beacon_block.message.state_root;
let beacon_state_root = beacon_block.message().state_root();
let beacon_block_root = beacon_block.canonical_root();
self.genesis_state_root = Some(beacon_state_root);
@ -292,12 +292,12 @@ where
.put_state(&beacon_state_root, &beacon_state)
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
store
.put_item(&beacon_block_root, &beacon_block)
.put_block(&beacon_block_root, beacon_block.clone())
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;
// Store the genesis block under the `ZERO_HASH` key.
store
.put_item(&Hash256::zero(), &beacon_block)
.put_block(&Hash256::zero(), beacon_block.clone())
.map_err(|e| {
format!(
"Failed to store genesis block under 0x00..00 alias: {:?}",
@ -316,13 +316,13 @@ where
let fork_choice = ForkChoice::from_genesis(
fc_store,
genesis.beacon_block_root,
&genesis.beacon_block.message,
&genesis.beacon_block.deconstruct().0,
&genesis.beacon_state,
)
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
self.fork_choice = Some(fork_choice);
self.genesis_time = Some(genesis.beacon_state.genesis_time);
self.genesis_time = Some(genesis.beacon_state.genesis_time());
Ok(self.empty_op_pool())
}
@ -435,7 +435,7 @@ where
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
let head_block = store
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
.get_block(&head_block_root)
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
.ok_or("Head block not found in store")?;
let head_state_root = head_block.state_root();
@ -460,7 +460,7 @@ where
//
// This is a sanity check to detect database corruption.
let fc_finalized = fork_choice.finalized_checkpoint();
let head_finalized = canonical_head.beacon_state.finalized_checkpoint;
let head_finalized = canonical_head.beacon_state.finalized_checkpoint();
if fc_finalized != head_finalized {
if head_finalized.root == Hash256::zero()
&& head_finalized.epoch == fc_finalized.epoch
@ -518,7 +518,7 @@ where
observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(),
eth1_chain: self.eth1_chain,
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(),
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
genesis_block_root,
genesis_state_root,
@ -558,7 +558,7 @@ where
"Weak subjectivity checkpoint verification failed on startup!";
"head_block_root" => format!("{}", head.beacon_block_root),
"head_slot" => format!("{}", head.beacon_block.slot()),
"finalized_epoch" => format!("{}", head.beacon_state.finalized_checkpoint.epoch),
"finalized_epoch" => format!("{}", head.beacon_state.finalized_checkpoint().epoch),
"wss_checkpoint_epoch" => format!("{}", wss_checkpoint.epoch),
"error" => format!("{:?}", e),
);
@ -640,16 +640,17 @@ fn genesis_block<T: EthSpec>(
genesis_state: &mut BeaconState<T>,
spec: &ChainSpec,
) -> Result<SignedBeaconBlock<T>, String> {
let mut genesis_block = SignedBeaconBlock {
message: BeaconBlock::empty(&spec),
// Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis
// block consistent with every other block.
signature: Signature::empty(),
};
genesis_block.message.state_root = genesis_state
let mut genesis_block = BeaconBlock::empty(&spec);
*genesis_block.state_root_mut() = genesis_state
.update_tree_hash_cache()
.map_err(|e| format!("Error hashing genesis state: {:?}", e))?;
Ok(genesis_block)
Ok(SignedBeaconBlock::from_block(
genesis_block,
// Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis
// block consistent with every other block.
Signature::empty(),
))
}
#[cfg(not(debug_assertions))]
@ -714,9 +715,10 @@ mod test {
let state = head.beacon_state;
let block = head.beacon_block;
assert_eq!(state.slot, Slot::new(0), "should start from genesis");
assert_eq!(state.slot(), Slot::new(0), "should start from genesis");
assert_eq!(
state.genesis_time, 13_371_337,
state.genesis_time(),
13_371_337,
"should have the correct genesis time"
);
assert_eq!(
@ -734,7 +736,7 @@ mod test {
"should store genesis block under zero hash alias"
);
assert_eq!(
state.validators.len(),
state.validators().len(),
validator_count,
"should have correct validator count"
);
@ -757,24 +759,25 @@ mod test {
.expect("should build state");
assert_eq!(
state.eth1_data.block_hash,
state.eth1_data().block_hash,
Hash256::from_slice(&[0x42; 32]),
"eth1 block hash should be co-ordinated junk"
);
assert_eq!(
state.genesis_time, genesis_time,
state.genesis_time(),
genesis_time,
"genesis time should be as specified"
);
for b in &state.balances {
for b in state.balances() {
assert_eq!(
*b, spec.max_effective_balance,
"validator balances should be max effective balance"
);
}
for v in &state.validators {
for v in state.validators() {
let creds = v.withdrawal_credentials.as_bytes();
assert_eq!(
creds[0], spec.bls_withdrawal_prefix_byte,
@ -788,13 +791,13 @@ mod test {
}
assert_eq!(
state.balances.len(),
state.balances().len(),
validator_count,
"validator balances len should be correct"
);
assert_eq!(
state.validators.len(),
state.validators().len(),
validator_count,
"validator count should be correct"
);

View File

@ -2,6 +2,7 @@ use crate::metrics;
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
use eth2::lighthouse::Eth1SyncStatusData;
use eth2_hashing::hash;
use int_to_bytes::int_to_bytes32;
use slog::{debug, error, trace, Logger};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
@ -364,7 +365,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
Ok(Eth1Data {
deposit_root: Hash256::from_slice(&deposit_root),
deposit_count: state.eth1_deposit_index,
deposit_count: state.eth1_deposit_index(),
block_hash: Hash256::from_slice(&block_hash),
})
}
@ -451,9 +452,9 @@ impl<T: EthSpec> CachingEth1Backend<T> {
impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
let period = T::SlotsPerEth1VotingPeriod::to_u64();
let voting_period_start_slot = (state.slot / period) * period;
let voting_period_start_slot = (state.slot() / period) * period;
let voting_period_start_seconds = slot_start_seconds::<T>(
state.genesis_time,
state.genesis_time(),
spec.seconds_per_slot,
voting_period_start_slot,
);
@ -491,13 +492,13 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
vote
})
.unwrap_or_else(|| {
let vote = state.eth1_data.clone();
let vote = state.eth1_data().clone();
error!(
self.log,
"No valid eth1_data votes, `votes_to_consider` empty";
"lowest_block_number" => self.core.lowest_block_number(),
"earliest_block_timestamp" => self.core.earliest_block_timestamp(),
"genesis_time" => state.genesis_time,
"genesis_time" => state.genesis_time(),
"outcome" => "casting `state.eth1_data` as eth1 vote"
);
metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES);
@ -522,11 +523,11 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
eth1_data_vote: &Eth1Data,
_spec: &ChainSpec,
) -> Result<Vec<Deposit>, Error> {
let deposit_index = state.eth1_deposit_index;
let deposit_index = state.eth1_deposit_index();
let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? {
new_eth1_data.deposit_count
} else {
state.eth1_data.deposit_count
state.eth1_data().deposit_count
};
match deposit_index.cmp(&deposit_count) {
@ -609,7 +610,7 @@ fn collect_valid_votes<T: EthSpec>(
) -> Eth1DataVoteCount {
let mut valid_votes = HashMap::new();
state
.eth1_data_votes
.eth1_data_votes()
.iter()
.filter_map(|vote| {
votes_to_consider
@ -633,13 +634,6 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option<Eth1Data> {
.map(|((eth1_data, _), _)| eth1_data.clone())
}
/// Returns `int` as little-endian bytes with a length of 32.
fn int_to_bytes32(int: u64) -> Vec<u8> {
let mut vec = int.to_le_bytes().to_vec();
vec.resize(32, 0);
vec
}
/// Returns the unix-epoch seconds at the start of the given `slot`.
fn slot_start_seconds<T: EthSpec>(
genesis_unix_seconds: u64,
@ -666,7 +660,7 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) ->
mod test {
use super::*;
use environment::null_logger;
use types::{test_utils::DepositTestTask, MinimalEthSpec};
use types::{DepositData, MinimalEthSpec, Signature};
type E = MinimalEthSpec;
@ -680,9 +674,9 @@ mod test {
fn get_voting_period_start_seconds(state: &BeaconState<E>, spec: &ChainSpec) -> u64 {
let period = <E as EthSpec>::SlotsPerEth1VotingPeriod::to_u64();
let voting_period_start_slot = (state.slot / period) * period;
let voting_period_start_slot = (state.slot() / period) * period;
slot_start_seconds::<E>(
state.genesis_time,
state.genesis_time(),
spec.seconds_per_slot,
voting_period_start_slot,
)
@ -723,10 +717,7 @@ mod test {
mod eth1_chain_json_backend {
use super::*;
use eth1::DepositLog;
use types::{
test_utils::{generate_deterministic_keypair, TestingDepositBuilder},
EthSpec, MainnetEthSpec,
};
use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec};
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E>, E> {
let eth1_config = Eth1Config {
@ -743,13 +734,17 @@ mod test {
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
let keypair = generate_deterministic_keypair(i as usize);
let mut builder =
TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance);
builder.sign(DepositTestTask::Valid, &keypair, spec);
let deposit_data = builder.build().data;
let mut deposit = DepositData {
pubkey: keypair.pk.into(),
withdrawal_credentials: Hash256::zero(),
amount: spec.max_effective_balance,
signature: Signature::empty().into(),
};
deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec());
DepositLog {
deposit_data,
deposit_data: deposit,
block_number: i,
index: i,
signature_is_valid: true,
@ -768,8 +763,8 @@ mod test {
);
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
state.eth1_deposit_index = 0;
state.eth1_data.deposit_count = 0;
*state.eth1_deposit_index_mut() = 0;
state.eth1_data_mut().deposit_count = 0;
assert!(
eth1_chain
@ -778,7 +773,7 @@ mod test {
"should succeed if cache is empty but no deposits are required"
);
state.eth1_data.deposit_count = 1;
state.eth1_data_mut().deposit_count = 1;
assert!(
eth1_chain
@ -821,8 +816,8 @@ mod test {
);
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
state.eth1_deposit_index = 0;
state.eth1_data.deposit_count = 0;
*state.eth1_deposit_index_mut() = 0;
state.eth1_data_mut().deposit_count = 0;
assert!(
eth1_chain
@ -832,10 +827,10 @@ mod test {
);
(0..3).for_each(|initial_deposit_index| {
state.eth1_deposit_index = initial_deposit_index as u64;
*state.eth1_deposit_index_mut() = initial_deposit_index as u64;
(initial_deposit_index..deposits.len()).for_each(|i| {
state.eth1_data.deposit_count = i as u64;
state.eth1_data_mut().deposit_count = i as u64;
let deposits_for_inclusion = eth1_chain
.deposits_for_block_inclusion(&state, &Eth1Data::default(), spec)
@ -888,7 +883,8 @@ mod test {
.eth1_data_for_block_production(&state, &spec)
.expect("should produce default eth1 data vote");
assert_eq!(
a, state.eth1_data,
a,
*state.eth1_data(),
"default vote should be same as state.eth1_data"
);
}
@ -908,7 +904,7 @@ mod test {
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
state.slot = Slot::from(slots_per_eth1_voting_period * 10);
*state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10);
let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block;
let voting_period_start = get_voting_period_start_seconds(&state, &spec);
let start_eth1_block = voting_period_start - follow_distance_seconds * 2;
@ -974,8 +970,8 @@ mod test {
let eth1_follow_distance = spec.eth1_follow_distance;
let mut state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), &spec);
state.genesis_time = 0;
state.slot = Slot::from(slots_per_eth1_voting_period * 10);
*state.genesis_time_mut() = 0;
*state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10);
let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block;
let voting_period_start = get_voting_period_start_seconds(&state, &spec);
@ -1055,7 +1051,7 @@ mod test {
let votes_to_consider = get_eth1_data_vec(slots, 0);
state.eth1_data_votes = votes_to_consider[0..slots as usize / 4]
*state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4]
.iter()
.map(|(eth1_data, _)| eth1_data)
.cloned()
@ -1084,7 +1080,7 @@ mod test {
.expect("should have some eth1 data")
.clone();
state.eth1_data_votes = vec![duplicate_eth1_data.clone(); 4]
*state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4]
.iter()
.map(|(eth1_data, _)| eth1_data)
.cloned()

View File

@ -112,14 +112,14 @@ mod test {
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
let block_root = Hash256::from_low_u64_be(i);
block.slot = Slot::new(i);
block.parent_root = if i == 0 {
*block.slot_mut() = Slot::new(i);
*block.parent_root_mut() = if i == 0 {
Hash256::random()
} else {
Hash256::from_low_u64_be(i - 1)
};
head_tracker.register_block(block_root, block.parent_root, block.slot);
head_tracker.register_block(block_root, block.parent_root(), block.slot());
}
assert_eq!(
@ -130,9 +130,9 @@ mod test {
let mut block: BeaconBlock<E> = BeaconBlock::empty(spec);
let block_root = Hash256::from_low_u64_be(42);
block.slot = Slot::new(15);
block.parent_root = Hash256::from_low_u64_be(14);
head_tracker.register_block(block_root, block.parent_root, block.slot);
*block.slot_mut() = Slot::new(15);
*block.parent_root_mut() = Hash256::from_low_u64_be(14);
head_tracker.register_block(block_root, block.parent_root(), block.slot());
let heads = head_tracker.heads();

View File

@ -682,42 +682,53 @@ pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`.
fn scrape_head_state<T: EthSpec>(state: &BeaconState<T>, state_root: Hash256) {
set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot);
set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot());
set_gauge_by_hash(&HEAD_STATE_ROOT, state_root);
set_gauge_by_slot(
&HEAD_STATE_LATEST_BLOCK_SLOT,
state.latest_block_header.slot,
state.latest_block_header().slot,
);
set_gauge_by_hash(
&HEAD_STATE_CURRENT_JUSTIFIED_ROOT,
state.current_justified_checkpoint.root,
state.current_justified_checkpoint().root,
);
set_gauge_by_epoch(
&HEAD_STATE_CURRENT_JUSTIFIED_EPOCH,
state.current_justified_checkpoint.epoch,
state.current_justified_checkpoint().epoch,
);
set_gauge_by_hash(
&HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT,
state.previous_justified_checkpoint.root,
state.previous_justified_checkpoint().root,
);
set_gauge_by_epoch(
&HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH,
state.previous_justified_checkpoint.epoch,
state.previous_justified_checkpoint().epoch,
);
set_gauge_by_hash(
&HEAD_STATE_FINALIZED_ROOT,
state.finalized_checkpoint().root,
);
set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root);
set_gauge_by_epoch(
&HEAD_STATE_FINALIZED_EPOCH,
state.finalized_checkpoint.epoch,
state.finalized_checkpoint().epoch,
);
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len());
set_gauge_by_u64(
&HEAD_STATE_VALIDATOR_BALANCES,
state.balances().iter().sum(),
);
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index());
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len());
set_gauge_by_u64(
&HEAD_STATE_VALIDATOR_BALANCES,
state.balances().iter().sum(),
);
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index);
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len());
set_gauge_by_u64(&HEAD_STATE_VALIDATOR_BALANCES, state.balances.iter().sum());
let mut num_active: usize = 0;
let mut num_slashed: usize = 0;
let mut num_withdrawn: usize = 0;
for v in &state.validators {
for v in state.validators() {
if v.is_active_at(state.current_epoch()) {
num_active += 1;
}

View File

@ -284,9 +284,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
// The finalized state must be for the epoch boundary slot, not the slot of the finalized
// block.
if new_finalized_state.slot != new_finalized_slot {
if new_finalized_state.slot() != new_finalized_slot {
return Err(PruningError::IncorrectFinalizedState {
state_slot: new_finalized_state.slot,
state_slot: new_finalized_state.slot(),
new_finalized_slot,
}
.into());

View File

@ -3,7 +3,7 @@
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use types::{BeaconBlock, EthSpec, Slot, Unsigned};
use types::{BeaconBlockRef, EthSpec, Slot, Unsigned};
#[derive(Debug, PartialEq)]
pub enum Error {
@ -52,14 +52,14 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn observe_proposer(&mut self, block: &BeaconBlock<E>) -> Result<bool, Error> {
pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
self.sanitize_block(block)?;
let did_not_exist = self
.items
.entry(block.slot)
.entry(block.slot())
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize()))
.insert(block.proposer_index);
.insert(block.proposer_index());
Ok(!did_not_exist)
}
@ -72,27 +72,27 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn proposer_has_been_observed(&self, block: &BeaconBlock<E>) -> Result<bool, Error> {
pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
self.sanitize_block(block)?;
let exists = self
.items
.get(&block.slot)
.map_or(false, |set| set.contains(&block.proposer_index));
.get(&block.slot())
.map_or(false, |set| set.contains(&block.proposer_index()));
Ok(exists)
}
/// Returns `Ok(())` if the given `block` is sane.
fn sanitize_block(&self, block: &BeaconBlock<E>) -> Result<(), Error> {
if block.proposer_index > E::ValidatorRegistryLimit::to_u64() {
return Err(Error::ValidatorIndexTooHigh(block.proposer_index));
fn sanitize_block(&self, block: BeaconBlockRef<'_, E>) -> Result<(), Error> {
if block.proposer_index() >= E::ValidatorRegistryLimit::to_u64() {
return Err(Error::ValidatorIndexTooHigh(block.proposer_index()));
}
let finalized_slot = self.finalized_slot;
if finalized_slot > 0 && block.slot <= finalized_slot {
if finalized_slot > 0 && block.slot() <= finalized_slot {
return Err(Error::FinalizedBlock {
slot: block.slot,
slot: block.slot(),
finalized_slot,
});
}
@ -119,14 +119,14 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
#[cfg(test)]
mod tests {
use super::*;
use types::MainnetEthSpec;
use types::{BeaconBlock, MainnetEthSpec};
type E = MainnetEthSpec;
fn get_block(slot: u64, proposer: u64) -> BeaconBlock<E> {
let mut block = BeaconBlock::empty(&E::default_spec());
block.slot = slot.into();
block.proposer_index = proposer;
*block.slot_mut() = slot.into();
*block.proposer_index_mut() = proposer;
block
}
@ -138,10 +138,10 @@ mod tests {
assert_eq!(cache.items.len(), 0, "no slots should be present");
// Slot 0, proposer 0
let block_a = &get_block(0, 0);
let block_a = get_block(0, 0);
assert_eq!(
cache.observe_proposer(block_a),
cache.observe_proposer(block_a.to_ref()),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
@ -197,10 +197,10 @@ mod tests {
*/
// First slot of finalized epoch, proposer 0
let block_b = &get_block(E::slots_per_epoch(), 0);
let block_b = get_block(E::slots_per_epoch(), 0);
assert_eq!(
cache.observe_proposer(block_b),
cache.observe_proposer(block_b.to_ref()),
Err(Error::FinalizedBlock {
slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(),
@ -217,10 +217,10 @@ mod tests {
let three_epochs = E::slots_per_epoch() * 3;
// First slot of finalized epoch, proposer 0
let block_b = &get_block(three_epochs, 0);
let block_b = get_block(three_epochs, 0);
assert_eq!(
cache.observe_proposer(block_b),
cache.observe_proposer(block_b.to_ref()),
Ok(false),
"can insert non-finalized block"
);
@ -266,25 +266,25 @@ mod tests {
let mut cache = ObservedBlockProducers::default();
// Slot 0, proposer 0
let block_a = &get_block(0, 0);
let block_a = get_block(0, 0);
assert_eq!(
cache.proposer_has_been_observed(block_a),
cache.proposer_has_been_observed(block_a.to_ref()),
Ok(false),
"no observation in empty cache"
);
assert_eq!(
cache.observe_proposer(block_a),
cache.observe_proposer(block_a.to_ref()),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_a),
cache.proposer_has_been_observed(block_a.to_ref()),
Ok(true),
"observed block is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_a),
cache.observe_proposer(block_a.to_ref()),
Ok(true),
"observing again indicates true"
);
@ -302,25 +302,25 @@ mod tests {
);
// Slot 1, proposer 0
let block_b = &get_block(1, 0);
let block_b = get_block(1, 0);
assert_eq!(
cache.proposer_has_been_observed(block_b),
cache.proposer_has_been_observed(block_b.to_ref()),
Ok(false),
"no observation for new slot"
);
assert_eq!(
cache.observe_proposer(block_b),
cache.observe_proposer(block_b.to_ref()),
Ok(false),
"can observe proposer for new slot, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_b),
cache.proposer_has_been_observed(block_b.to_ref()),
Ok(true),
"observed block in slot 1 is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_b),
cache.observe_proposer(block_b.to_ref()),
Ok(true),
"observing slot 1 again indicates true"
);
@ -347,25 +347,25 @@ mod tests {
);
// Slot 0, proposer 1
let block_c = &get_block(0, 1);
let block_c = get_block(0, 1);
assert_eq!(
cache.proposer_has_been_observed(block_c),
cache.proposer_has_been_observed(block_c.to_ref()),
Ok(false),
"no observation for new proposer"
);
assert_eq!(
cache.observe_proposer(block_c),
cache.observe_proposer(block_c.to_ref()),
Ok(false),
"can observe new proposer, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_c),
cache.proposer_has_been_observed(block_c.to_ref()),
Ok(true),
"observed new proposer block is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_c),
cache.observe_proposer(block_c.to_ref()),
Ok(true),
"observing new proposer again indicates true"
);

View File

@ -161,7 +161,7 @@ impl<T: EthSpec> SnapshotCache<T> {
.enumerate()
.filter_map(|(i, snapshot)| {
if snapshot.beacon_block_root != self.head_block_root {
Some((i, snapshot.beacon_state.slot))
Some((i, snapshot.beacon_state.slot()))
} else {
None
}
@ -263,7 +263,7 @@ impl<T: EthSpec> SnapshotCache<T> {
/// Removes all snapshots from the queue that are less than or equal to the finalized epoch.
pub fn prune(&mut self, finalized_epoch: Epoch) {
self.snapshots.retain(|snapshot| {
snapshot.beacon_state.slot > finalized_epoch.start_slot(T::slots_per_epoch())
snapshot.beacon_state.slot() > finalized_epoch.start_slot(T::slots_per_epoch())
})
}
@ -279,27 +279,43 @@ impl<T: EthSpec> SnapshotCache<T> {
#[cfg(test)]
mod test {
use super::*;
use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use store::StoreConfig;
use types::{
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
BeaconBlock, Epoch, MainnetEthSpec, SignedBeaconBlock, Slot,
test_utils::generate_deterministic_keypair, BeaconBlock, Epoch, MainnetEthSpec,
SignedBeaconBlock, Slot,
};
fn get_harness() -> BeaconChainHarness<EphemeralHarnessType<MainnetEthSpec>> {
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
None,
types::test_utils::generate_deterministic_keypairs(1),
StoreConfig::default(),
);
harness.advance_slot();
harness
}
const CACHE_SIZE: usize = 4;
fn get_snapshot(i: u64) -> BeaconSnapshot<MainnetEthSpec> {
let spec = MainnetEthSpec::default_spec();
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(1, &spec);
let (beacon_state, _keypairs) = state_builder.build();
let beacon_state = get_harness().chain.head_beacon_state().unwrap();
let signed_beacon_block = SignedBeaconBlock::from_block(
BeaconBlock::empty(&spec),
generate_deterministic_keypair(0)
.sk
.sign(Hash256::from_low_u64_be(42)),
);
BeaconSnapshot {
beacon_state,
beacon_block: SignedBeaconBlock {
message: BeaconBlock::empty(&spec),
signature: generate_deterministic_keypair(0)
.sk
.sign(Hash256::from_low_u64_be(42)),
},
beacon_block: signed_beacon_block,
beacon_block_root: Hash256::from_low_u64_be(i),
}
}
@ -319,7 +335,8 @@ mod test {
let mut snapshot = get_snapshot(i);
// Each snapshot should be one slot into an epoch, with each snapshot one epoch apart.
snapshot.beacon_state.slot = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1);
*snapshot.beacon_state.slot_mut() =
Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1);
cache.insert(snapshot, None);
@ -352,20 +369,20 @@ mod test {
.get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none())
.is_none());
assert!(
assert_eq!(
cache
.get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none())
.expect("the head should still be in the cache")
.beacon_block_root
== Hash256::from_low_u64_be(0),
.beacon_block_root,
Hash256::from_low_u64_be(0),
"get_cloned should get the correct snapshot"
);
assert!(
assert_eq!(
cache
.get_state_for_block_processing(Hash256::from_low_u64_be(0))
.expect("the head should still be in the cache")
.beacon_block_root
== Hash256::from_low_u64_be(0),
.beacon_block_root,
Hash256::from_low_u64_be(0),
"get_state_for_block_processing should get the correct snapshot"
);
@ -392,12 +409,12 @@ mod test {
}
// Ensure that the new head value was not removed from the cache.
assert!(
assert_eq!(
cache
.get_state_for_block_processing(Hash256::from_low_u64_be(2))
.expect("the new head should still be in the cache")
.beacon_block_root
== Hash256::from_low_u64_be(2),
.beacon_block_root,
Hash256::from_low_u64_be(2),
"get_state_for_block_processing should get the correct snapshot"
);
}

View File

@ -213,10 +213,10 @@ fn advance_head<T: BeaconChainTypes>(
} => (block_slot, state_root, *state),
};
let initial_slot = state.slot;
let initial_slot = state.slot();
let initial_epoch = state.current_epoch();
let state_root = if state.slot == head_slot {
let state_root = if state.slot() == head_slot {
Some(head_state_root)
} else {
// Protect against advancing a state more than a single slot.
@ -225,7 +225,7 @@ fn advance_head<T: BeaconChainTypes>(
// database. Future works might store temporary, intermediate states inside this function.
return Err(Error::BadStateSlot {
block_slot: head_slot,
state_slot: state.slot,
state_slot: state.slot(),
});
};
@ -249,7 +249,7 @@ fn advance_head<T: BeaconChainTypes>(
log,
"Advanced head state one slot";
"head_root" => ?head_root,
"state_slot" => state.slot,
"state_slot" => state.slot(),
"current_slot" => current_slot,
);
@ -278,7 +278,7 @@ fn advance_head<T: BeaconChainTypes>(
state
.get_beacon_proposer_indices(&beacon_chain.spec)
.map_err(BeaconChainError::from)?,
state.fork,
state.fork(),
)
.map_err(BeaconChainError::from)?;
@ -304,7 +304,7 @@ fn advance_head<T: BeaconChainTypes>(
);
}
let final_slot = state.slot;
let final_slot = state.slot();
// Insert the advanced state back into the snapshot cache.
beacon_chain

View File

@ -10,8 +10,11 @@ use crate::{
BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler,
StateSkipConfig,
};
use bls::get_withdrawal_credentials;
use futures::channel::mpsc::Receiver;
use genesis::interop_genesis_state;
use int_to_bytes::int_to_bytes32;
use merkle_proof::MerkleTree;
use parking_lot::Mutex;
use rand::rngs::StdRng;
use rand::Rng;
@ -29,11 +32,12 @@ use task_executor::ShutdownReason;
use tempfile::{tempdir, TempDir};
use tree_hash::TreeHash;
use types::{
AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState,
BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Graffiti, Hash256,
IndexedAttestation, Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof,
SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId,
VariableList, VoluntaryExit,
typenum::U4294967296, AggregateSignature, Attestation, AttestationData, AttesterSlashing,
BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, DepositData, Domain,
Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, ProposerSlashing,
PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, SignedBeaconBlock,
SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList,
VoluntaryExit,
};
pub use types::test_utils::generate_deterministic_keypairs;
@ -42,6 +46,8 @@ pub use types::test_utils::generate_deterministic_keypairs;
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
// This parameter is required by a builder but not used because we use the `TestingSlotClock`.
pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1);
// Environment variable to read if `fork_from_env` feature is enabled.
const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
pub type BaseHarnessType<TEthSpec, THotStore, TColdStore> =
Witness<TestingSlotClock, CachingEth1Backend<TEthSpec>, TEthSpec, THotStore, TColdStore>;
@ -106,6 +112,29 @@ pub fn test_logger() -> Logger {
}
}
/// Return a `ChainSpec` suitable for test usage.
///
/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment
/// variable. Otherwise use the default spec.
pub fn test_spec<E: EthSpec>() -> ChainSpec {
if cfg!(feature = "fork_from_env") {
let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| {
panic!(
"{} env var must be defined when using fork_from_env: {:?}",
FORK_NAME_ENV_VAR, e
)
});
let fork = match fork_name.as_str() {
"base" => ForkName::Base,
"altair" => ForkName::Altair,
other => panic!("unknown FORK_NAME: {}", other),
};
fork.make_genesis_spec(E::default_spec())
} else {
E::default_spec()
}
}
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
/// attestations.
///
@ -121,15 +150,20 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
pub rng: Mutex<StdRng>,
}
type HarnessAttestations<E> = Vec<(
pub type HarnessAttestations<E> = Vec<(
Vec<(Attestation<E>, SubnetId)>,
Option<SignedAggregateAndProof<E>>,
)>;
impl<E: EthSpec> BeaconChainHarness<EphemeralHarnessType<E>> {
pub fn new(eth_spec_instance: E, validator_keypairs: Vec<Keypair>) -> Self {
pub fn new(
eth_spec_instance: E,
spec: Option<ChainSpec>,
validator_keypairs: Vec<Keypair>,
) -> Self {
Self::new_with_store_config(
eth_spec_instance,
spec,
validator_keypairs,
StoreConfig::default(),
)
@ -137,6 +171,7 @@ impl<E: EthSpec> BeaconChainHarness<EphemeralHarnessType<E>> {
pub fn new_with_store_config(
eth_spec_instance: E,
spec: Option<ChainSpec>,
validator_keypairs: Vec<Keypair>,
config: StoreConfig,
) -> Self {
@ -144,18 +179,26 @@ impl<E: EthSpec> BeaconChainHarness<EphemeralHarnessType<E>> {
// committee are required to produce an aggregate. This is overkill, however with small
// validator counts it's the only way to be certain there is _at least one_ aggregator per
// committee.
Self::new_with_target_aggregators(eth_spec_instance, validator_keypairs, 1 << 32, config)
Self::new_with_target_aggregators(
eth_spec_instance,
spec,
validator_keypairs,
1 << 32,
config,
)
}
/// Instantiate a new harness with a custom `target_aggregators_per_committee` spec value
pub fn new_with_target_aggregators(
eth_spec_instance: E,
spec: Option<ChainSpec>,
validator_keypairs: Vec<Keypair>,
target_aggregators_per_committee: u64,
store_config: StoreConfig,
) -> Self {
Self::new_with_chain_config(
eth_spec_instance,
spec,
validator_keypairs,
target_aggregators_per_committee,
store_config,
@ -167,13 +210,14 @@ impl<E: EthSpec> BeaconChainHarness<EphemeralHarnessType<E>> {
/// `target_aggregators_per_committee` spec value, and a `ChainConfig`
pub fn new_with_chain_config(
eth_spec_instance: E,
spec: Option<ChainSpec>,
validator_keypairs: Vec<Keypair>,
target_aggregators_per_committee: u64,
store_config: StoreConfig,
chain_config: ChainConfig,
) -> Self {
let data_dir = tempdir().expect("should create temporary data_dir");
let mut spec = E::default_spec();
let mut spec = spec.unwrap_or_else(test_spec::<E>);
spec.target_aggregators_per_committee = target_aggregators_per_committee;
@ -221,11 +265,12 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators.
pub fn new_with_disk_store(
eth_spec_instance: E,
spec: Option<ChainSpec>,
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
validator_keypairs: Vec<Keypair>,
) -> Self {
let data_dir = tempdir().expect("should create temporary data_dir");
let spec = E::default_spec();
let spec = spec.unwrap_or_else(test_spec::<E>);
let log = test_logger();
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
@ -265,11 +310,12 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators.
pub fn resume_from_disk_store(
eth_spec_instance: E,
spec: Option<ChainSpec>,
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
validator_keypairs: Vec<Keypair>,
data_dir: TempDir,
) -> Self {
let spec = E::default_spec();
let spec = spec.unwrap_or_else(test_spec::<E>);
let log = test_logger();
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
@ -379,7 +425,7 @@ where
slot: Slot,
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot);
assert!(slot >= state.slot());
complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot");
@ -400,8 +446,8 @@ where
let domain = self.spec.get_domain(
epoch,
Domain::Randao,
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
);
let message = epoch.signing_root(domain);
let sk = &self.validator_keypairs[proposer_index].sk;
@ -415,14 +461,68 @@ where
let signed_block = block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
(signed_block, state)
}
/// Useful for the `per_block_processing` tests. Creates a block, and returns the state after
/// caches are built but before the generated block is processed.
pub fn make_block_return_pre_state(
&self,
mut state: BeaconState<E>,
slot: Slot,
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot");
state
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
// If we produce two blocks for the same slot, they hash up to the same value and
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
// different blocks each time.
let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>());
let randao_reveal = {
let epoch = slot.epoch(E::slots_per_epoch());
let domain = self.spec.get_domain(
epoch,
Domain::Randao,
&state.fork(),
state.genesis_validators_root(),
);
let message = epoch.signing_root(domain);
let sk = &self.validator_keypairs[proposer_index].sk;
sk.sign(message)
};
let pre_state = state.clone();
let (block, state) = self
.chain
.produce_block_on_state(state, None, slot, randao_reveal, Some(graffiti))
.unwrap();
let signed_block = block.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
(signed_block, pre_state)
}
/// A list of attestations for each committee for the given slot.
///
/// The first layer of the Vec is organised per committee. For example, if the return value is
@ -436,7 +536,7 @@ where
head_block_root: SignedBeaconBlockHash,
attestation_slot: Slot,
) -> Vec<Vec<(Attestation<E>, SubnetId)>> {
let committee_count = state.get_committee_count_at_slot(state.slot).unwrap();
let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap();
state
.get_beacon_committees_at_slot(attestation_slot)
@ -467,8 +567,8 @@ where
let domain = self.spec.get_domain(
attestation.data.target.epoch,
Domain::BeaconAttester,
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
);
let message = attestation.data.signing_root(domain);
@ -540,65 +640,71 @@ where
slot,
);
let aggregated_attestations: Vec<Option<SignedAggregateAndProof<E>>> = unaggregated_attestations
.iter()
.map(|committee_attestations| {
// If there are any attestations in this committee, create an aggregate.
if let Some((attestation, _)) = committee_attestations.first() {
let bc = state.get_beacon_committee(attestation.data.slot, attestation.data.index)
.unwrap();
let aggregated_attestations: Vec<Option<SignedAggregateAndProof<E>>> =
unaggregated_attestations
.iter()
.map(|committee_attestations| {
// If there are any attestations in this committee, create an aggregate.
if let Some((attestation, _)) = committee_attestations.first() {
let bc = state
.get_beacon_committee(attestation.data.slot, attestation.data.index)
.unwrap();
let aggregator_index = bc.committee
.iter()
.find(|&validator_index| {
if !attesting_validators.contains(validator_index) {
return false
}
// Find an aggregator if one exists. Return `None` if there are no
// aggregators.
let aggregator_index = bc
.committee
.iter()
.find(|&validator_index| {
if !attesting_validators.contains(validator_index) {
return false;
}
let selection_proof = SelectionProof::new::<E>(
state.slot,
&self.validator_keypairs[*validator_index].sk,
&state.fork,
state.genesis_validators_root,
&self.spec,
);
let selection_proof = SelectionProof::new::<E>(
state.slot(),
&self.validator_keypairs[*validator_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
selection_proof.is_aggregator(bc.committee.len(), &self.spec).unwrap_or(false)
})
.copied()
.unwrap_or_else(|| panic!(
"Committee {} at slot {} with {} attesting validators does not have any aggregators",
bc.index, state.slot, bc.committee.len()
));
// If the chain is able to produce an aggregate, use that. Otherwise, build an
// aggregate locally.
let aggregate = self
.chain
.get_aggregated_attestation(&attestation.data)
.unwrap_or_else(|| {
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
agg.aggregate(att);
agg
selection_proof
.is_aggregator(bc.committee.len(), &self.spec)
.unwrap_or(false)
})
});
.copied()?;
let signed_aggregate = SignedAggregateAndProof::from_aggregate(
aggregator_index as u64,
aggregate,
None,
&self.validator_keypairs[aggregator_index].sk,
&state.fork,
state.genesis_validators_root,
&self.spec,
);
// If the chain is able to produce an aggregate, use that. Otherwise, build an
// aggregate locally.
let aggregate = self
.chain
.get_aggregated_attestation(&attestation.data)
.unwrap_or_else(|| {
committee_attestations.iter().skip(1).fold(
attestation.clone(),
|mut agg, (att, _)| {
agg.aggregate(att);
agg
},
)
});
Some(signed_aggregate)
}
else {
None
}
}).collect();
let signed_aggregate = SignedAggregateAndProof::from_aggregate(
aggregator_index as u64,
aggregate,
None,
&self.validator_keypairs[aggregator_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
Some(signed_aggregate)
} else {
None
}
})
.collect();
unaggregated_attestations
.into_iter()
@ -653,12 +759,70 @@ where
}
}
pub fn make_attester_slashing_different_indices(
&self,
validator_indices_1: Vec<u64>,
validator_indices_2: Vec<u64>,
) -> AttesterSlashing<E> {
let data = AttestationData {
slot: Slot::new(0),
index: 0,
beacon_block_root: Hash256::zero(),
target: Checkpoint {
root: Hash256::zero(),
epoch: Epoch::new(0),
},
source: Checkpoint {
root: Hash256::zero(),
epoch: Epoch::new(0),
},
};
let mut attestation_1 = IndexedAttestation {
attesting_indices: VariableList::new(validator_indices_1).unwrap(),
data: data.clone(),
signature: AggregateSignature::infinity(),
};
let mut attestation_2 = IndexedAttestation {
attesting_indices: VariableList::new(validator_indices_2).unwrap(),
data,
signature: AggregateSignature::infinity(),
};
attestation_2.data.index += 1;
for attestation in &mut [&mut attestation_1, &mut attestation_2] {
for &i in &attestation.attesting_indices {
let sk = &self.validator_keypairs[i as usize].sk;
let fork = self.chain.head_info().unwrap().fork;
let genesis_validators_root = self.chain.genesis_validators_root;
let domain = self.chain.spec.get_domain(
attestation.data.target.epoch,
Domain::BeaconAttester,
&fork,
genesis_validators_root,
);
let message = attestation.data.signing_root(domain);
attestation.signature.add_assign(&sk.sign(message));
}
}
AttesterSlashing {
attestation_1,
attestation_2,
}
}
pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing {
let mut block_header_1 = self
.chain
.head_beacon_block()
.unwrap()
.message
.message()
.block_header();
block_header_1.proposer_index = validator_index;
@ -694,6 +858,116 @@ where
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
}
pub fn add_voluntary_exit(
&self,
block: &mut BeaconBlock<E>,
validator_index: u64,
epoch: Epoch,
) {
let exit = self.make_voluntary_exit(validator_index, epoch);
block.body_mut().voluntary_exits_mut().push(exit).unwrap();
}
/// Create a new block, apply `block_modifier` to it, sign it and return it.
///
/// The state returned is a pre-block state at the same slot as the produced block.
pub fn make_block_with_modifier(
&self,
state: BeaconState<E>,
slot: Slot,
block_modifier: impl FnOnce(&mut BeaconBlock<E>),
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
let (block, state) = self.make_block_return_pre_state(state, slot);
let (mut block, _) = block.deconstruct();
block_modifier(&mut block);
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
let signed_block = block.sign(
&self.validator_keypairs[proposer_index as usize].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
);
(signed_block, state)
}
pub fn make_deposits<'a>(
&self,
state: &'a mut BeaconState<E>,
num_deposits: usize,
invalid_pubkey: Option<PublicKeyBytes>,
invalid_signature: Option<SignatureBytes>,
) -> (Vec<Deposit>, &'a mut BeaconState<E>) {
let mut datas = vec![];
for _ in 0..num_deposits {
let keypair = Keypair::random();
let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone());
let mut data = DepositData {
pubkey: pubkeybytes,
withdrawal_credentials: Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte)
[..],
),
amount: self.spec.min_deposit_amount,
signature: SignatureBytes::empty(),
};
data.signature = data.create_signature(&keypair.sk, &self.spec);
if let Some(invalid_pubkey) = invalid_pubkey {
data.pubkey = invalid_pubkey;
}
if let Some(invalid_signature) = invalid_signature.clone() {
data.signature = invalid_signature;
}
datas.push(data);
}
// Vector containing all leaves
let leaves = datas
.iter()
.map(|data| data.tree_hash_root())
.collect::<Vec<_>>();
// Building a VarList from leaves
let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone());
// Setting the deposit_root to be the tree_hash_root of the VarList
state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root();
state.eth1_data_mut().deposit_count = num_deposits as u64;
*state.eth1_deposit_index_mut() = 0;
// Building the merkle tree used for generating proofs
let tree = MerkleTree::create(&leaves[..], self.spec.deposit_contract_tree_depth as usize);
// Building proofs
let mut proofs = vec![];
for i in 0..leaves.len() {
let (_, mut proof) =
tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize);
proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64)));
proofs.push(proof);
}
// Building deposits
let deposits = datas
.into_par_iter()
.zip(proofs.into_par_iter())
.map(|(data, proof)| (data, proof.into()))
.map(|(data, proof)| Deposit { proof, data })
.collect::<Vec<_>>();
// Pushing deposits to block body
(deposits, state)
}
pub fn process_block(
&self,
slot: Slot,
@ -771,13 +1045,8 @@ where
block: &SignedBeaconBlock<E>,
validators: &[usize],
) {
let attestations = self.make_attestations(
validators,
&state,
state_root,
block_hash,
block.message.slot,
);
let attestations =
self.make_attestations(validators, &state, state_root, block_hash, block.slot());
self.process_attestations(attestations);
}
@ -932,7 +1201,7 @@ where
chain_dump
.iter()
.cloned()
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into())
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
.filter(|block_hash| *block_hash != Hash256::zero().into())
.collect()
}

View File

@ -14,7 +14,7 @@ use std::marker::PhantomData;
use std::str::Utf8Error;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use types::{
AttestationData, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Epoch, EthSpec,
AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec,
Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, Slot,
VoluntaryExit,
};
@ -237,7 +237,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState<T>) {
// Add any new validator indices.
state
.validators
.validators()
.iter()
.enumerate()
.skip(self.indices.len())
@ -255,7 +255,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
let i = i as usize;
let id = &monitored_validator.id;
if let Some(balance) = state.balances.get(i) {
if let Some(balance) = state.balances().get(i) {
metrics::set_int_gauge(
&metrics::VALIDATOR_MONITOR_BALANCE_GWEI,
&[id],
@ -263,7 +263,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
);
}
if let Some(validator) = state.validators.get(i) {
if let Some(validator) = state.validators().get(i) {
metrics::set_int_gauge(
&metrics::VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI,
&[id],
@ -473,7 +473,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
pub fn register_gossip_block<S: SlotClock>(
&self,
seen_timestamp: Duration,
block: &BeaconBlock<T>,
block: BeaconBlockRef<'_, T>,
block_root: Hash256,
slot_clock: &S,
) {
@ -484,7 +484,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
pub fn register_api_block<S: SlotClock>(
&self,
seen_timestamp: Duration,
block: &BeaconBlock<T>,
block: BeaconBlockRef<'_, T>,
block_root: Hash256,
slot_clock: &S,
) {
@ -495,11 +495,11 @@ impl<T: EthSpec> ValidatorMonitor<T> {
&self,
src: &str,
seen_timestamp: Duration,
block: &BeaconBlock<T>,
block: BeaconBlockRef<'_, T>,
block_root: Hash256,
slot_clock: &S,
) {
if let Some(id) = self.get_validator_id(block.proposer_index) {
if let Some(id) = self.get_validator_id(block.proposer_index()) {
let delay = get_block_delay_ms(seen_timestamp, block, slot_clock);
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]);
@ -514,7 +514,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
"Block from API";
"root" => ?block_root,
"delay" => %delay.as_millis(),
"slot" => %block.slot,
"slot" => %block.slot(),
"src" => src,
"validator" => %id,
);
@ -741,11 +741,11 @@ impl<T: EthSpec> ValidatorMonitor<T> {
pub fn register_attestation_in_block(
&self,
indexed_attestation: &IndexedAttestation<T>,
block: &BeaconBlock<T>,
block: BeaconBlockRef<'_, T>,
spec: &ChainSpec,
) {
let data = &indexed_attestation.data;
let delay = (block.slot - data.slot) - spec.min_attestation_inclusion_delay;
let delay = (block.slot() - data.slot) - spec.min_attestation_inclusion_delay;
let epoch = data.slot.epoch(T::slots_per_epoch());
indexed_attestation.attesting_indices.iter().for_each(|i| {
@ -1043,10 +1043,10 @@ fn u64_to_i64(n: impl Into<u64>) -> i64 {
/// Returns the delay between the start of `block.slot` and `seen_timestamp`.
pub fn get_block_delay_ms<T: EthSpec, S: SlotClock>(
seen_timestamp: Duration,
block: &BeaconBlock<T>,
block: BeaconBlockRef<'_, T>,
slot_clock: &S,
) -> Duration {
get_slot_delay_ms::<S>(seen_timestamp, block.slot, slot_clock)
get_slot_delay_ms::<S>(seen_timestamp, block.slot(), slot_clock)
}
/// Returns the delay between the start of `slot` and `seen_timestamp`.

View File

@ -110,9 +110,9 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
&mut self,
state: &BeaconState<T::EthSpec>,
) -> Result<(), BeaconChainError> {
if state.validators.len() > self.pubkeys.len() {
if state.validators().len() > self.pubkeys.len() {
self.import(
state.validators[self.pubkeys.len()..]
state.validators()[self.pubkeys.len()..]
.iter()
.map(|v| v.pubkey),
)
@ -316,23 +316,28 @@ fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Res
#[cfg(test)]
mod test {
use super::*;
use crate::test_utils::{test_logger, EphemeralHarnessType};
use crate::test_utils::{test_logger, BeaconChainHarness, EphemeralHarnessType};
use std::sync::Arc;
use store::HotColdDB;
use store::{HotColdDB, StoreConfig};
use tempfile::tempdir;
use types::{
test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder},
BeaconState, EthSpec, Keypair, MainnetEthSpec,
test_utils::generate_deterministic_keypair, BeaconState, EthSpec, Keypair, MainnetEthSpec,
};
type E = MainnetEthSpec;
type T = EphemeralHarnessType<E>;
fn get_state(validator_count: usize) -> (BeaconState<E>, Vec<Keypair>) {
let spec = E::default_spec();
let builder =
TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &spec);
builder.build()
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
None,
types::test_utils::generate_deterministic_keypairs(validator_count),
StoreConfig::default(),
);
harness.advance_slot();
(harness.get_current_state(), harness.validator_keypairs)
}
fn get_store() -> BeaconStore<T> {

View File

@ -3,10 +3,8 @@
#[macro_use]
extern crate lazy_static;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
StateSkipConfig, WhenSlotSkipped,
};
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
use store::config::StoreConfig;
use tree_hash::TreeHash;
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
@ -29,6 +27,7 @@ fn produces_attestations() {
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
None,
KEYPAIRS[..].to_vec(),
StoreConfig::default(),
);
@ -63,12 +62,12 @@ fn produces_attestations() {
.block_at_slot(block_slot, WhenSlotSkipped::Prev)
.expect("should get block")
.expect("block should not be skipped");
let block_root = block.message.tree_hash_root();
let block_root = block.message().tree_hash_root();
let epoch_boundary_slot = state
.current_epoch()
.start_slot(MainnetEthSpec::slots_per_epoch());
let target_root = if state.slot == epoch_boundary_slot {
let target_root = if state.slot() == epoch_boundary_slot {
block_root
} else {
*state
@ -116,11 +115,13 @@ fn produces_attestations() {
assert_eq!(data.slot, slot, "bad slot");
assert_eq!(data.beacon_block_root, block_root, "bad block root");
assert_eq!(
data.source, state.current_justified_checkpoint,
data.source,
state.current_justified_checkpoint(),
"bad source"
);
assert_eq!(
data.source, state.current_justified_checkpoint,
data.source,
state.current_justified_checkpoint(),
"bad source"
);
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");

View File

@ -17,7 +17,7 @@ use tree_hash::TreeHash;
use types::{
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, BeaconStateError,
BitList, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof,
SignedAggregateAndProof, SignedBeaconBlock, SubnetId, Unsigned,
SignedAggregateAndProof, SubnetId, Unsigned,
};
pub type E = MainnetEthSpec;
@ -35,6 +35,7 @@ lazy_static! {
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_target_aggregators(
MainnetEthSpec,
None,
KEYPAIRS[0..validator_count].to_vec(),
// A kind-of arbitrary number that ensures that _some_ validators are aggregators, but
// not all.
@ -75,7 +76,7 @@ fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
.sign(
&validator_sk,
validator_committee_index,
&head.beacon_state.fork,
&head.beacon_state.fork(),
chain.genesis_validators_root,
&chain.spec,
)
@ -120,7 +121,7 @@ fn get_valid_aggregated_attestation<T: BeaconChainTypes>(
let proof = SelectionProof::new::<T::EthSpec>(
aggregate.data.slot,
&aggregator_sk,
&state.fork,
&state.fork(),
chain.genesis_validators_root,
&chain.spec,
);
@ -138,7 +139,7 @@ fn get_valid_aggregated_attestation<T: BeaconChainTypes>(
aggregate,
None,
&aggregator_sk,
&state.fork,
&state.fork(),
chain.genesis_validators_root,
&chain.spec,
);
@ -169,7 +170,7 @@ fn get_non_aggregator<T: BeaconChainTypes>(
let proof = SelectionProof::new::<T::EthSpec>(
aggregate.data.slot,
&aggregator_sk,
&state.fork,
&state.fork(),
chain.genesis_validators_root,
&chain.spec,
);
@ -922,7 +923,7 @@ fn attestation_that_skips_epochs() {
.expect("should not error getting state")
.expect("should find state");
while state.slot < current_slot {
while state.slot() < current_slot {
per_slot_processing(&mut state, None, &harness.spec).expect("should process slot");
}
@ -946,11 +947,11 @@ fn attestation_that_skips_epochs() {
let block_slot = harness
.chain
.store
.get_item::<SignedBeaconBlock<E>>(&block_root)
.get_block(&block_root)
.expect("should not error getting block")
.expect("should find attestation block")
.message
.slot;
.message()
.slot();
assert!(
attestation.data.slot - block_slot > E::slots_per_epoch() * 2,

View File

@ -3,20 +3,19 @@
#[macro_use]
extern crate lazy_static;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
BeaconSnapshot, BlockError,
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
use beacon_chain::{BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult};
use slasher::{Config as SlasherConfig, Slasher};
use state_processing::{
per_block_processing::{per_block_processing, BlockSignatureStrategy},
per_slot_processing, BlockProcessingError,
};
use std::sync::Arc;
use store::config::StoreConfig;
use tempfile::tempdir;
use types::{
test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData,
AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256,
IndexedAttestation, Keypair, MainnetEthSpec, ProposerSlashing, Signature, SignedBeaconBlock,
SignedBeaconBlockHeader, SignedVoluntaryExit, Slot, VoluntaryExit, DEPOSIT_TREE_DEPTH,
};
use types::{test_utils::generate_deterministic_keypair, *};
type E = MainnetEthSpec;
@ -54,6 +53,7 @@ fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
None,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
);
@ -98,10 +98,11 @@ fn update_proposal_signatures(
.get(proposer_index)
.expect("proposer keypair should be available");
snapshot.beacon_block = snapshot.beacon_block.message.clone().sign(
let (block, _) = snapshot.beacon_block.clone().deconstruct();
snapshot.beacon_block = block.sign(
&keypair.sk,
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
spec,
);
}
@ -111,7 +112,9 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
for i in 0..snapshots.len() {
let root = snapshots[i].beacon_block.canonical_root();
if let Some(child) = snapshots.get_mut(i + 1) {
child.beacon_block.message.parent_root = root
let (mut block, signature) = child.beacon_block.clone().deconstruct();
*block.parent_root_mut() = root;
child.beacon_block = SignedBeaconBlock::from_block(block, signature)
}
}
}
@ -168,10 +171,7 @@ fn chain_segment_varying_chunk_size() {
.chain
.process_chain_segment(chunk.to_vec())
.into_block_error()
.expect(&format!(
"should import chain segment of len {}",
chunk_size
));
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
}
harness.chain.fork_choice().expect("should run fork choice");
@ -206,7 +206,7 @@ fn chain_segment_non_linear_parent_roots() {
matches!(
harness
.chain
.process_chain_segment(blocks.clone())
.process_chain_segment(blocks)
.into_block_error(),
Err(BlockError::NonLinearParentRoots)
),
@ -217,13 +217,15 @@ fn chain_segment_non_linear_parent_roots() {
* Test with a modified parent root.
*/
let mut blocks = chain_segment_blocks();
blocks[3].message.parent_root = Hash256::zero();
let (mut block, signature) = blocks[3].clone().deconstruct();
*block.parent_root_mut() = Hash256::zero();
blocks[3] = SignedBeaconBlock::from_block(block, signature);
assert!(
matches!(
harness
.chain
.process_chain_segment(blocks.clone())
.process_chain_segment(blocks)
.into_block_error(),
Err(BlockError::NonLinearParentRoots)
),
@ -244,13 +246,15 @@ fn chain_segment_non_linear_slots() {
*/
let mut blocks = chain_segment_blocks();
blocks[3].message.slot = Slot::new(0);
let (mut block, signature) = blocks[3].clone().deconstruct();
*block.slot_mut() = Slot::new(0);
blocks[3] = SignedBeaconBlock::from_block(block, signature);
assert!(
matches!(
harness
.chain
.process_chain_segment(blocks.clone())
.process_chain_segment(blocks)
.into_block_error(),
Err(BlockError::NonLinearSlots)
),
@ -262,13 +266,15 @@ fn chain_segment_non_linear_slots() {
*/
let mut blocks = chain_segment_blocks();
blocks[3].message.slot = blocks[2].message.slot;
let (mut block, signature) = blocks[3].clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
blocks[3] = SignedBeaconBlock::from_block(block, signature);
assert!(
matches!(
harness
.chain
.process_chain_segment(blocks.clone())
.process_chain_segment(blocks)
.into_block_error(),
Err(BlockError::NonLinearSlots)
),
@ -342,7 +348,9 @@ fn invalid_signature_gossip_block() {
// Ensure the block will be rejected if imported on its own (without gossip checking).
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
snapshots[block_index].beacon_block.signature = junk_signature();
let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct();
snapshots[block_index].beacon_block =
SignedBeaconBlock::from_block(block.clone(), junk_signature());
// Import all the ancestors before the `block_index` block.
let ancestor_blocks = CHAIN_SEGMENT
.iter()
@ -358,7 +366,7 @@ fn invalid_signature_gossip_block() {
matches!(
harness
.chain
.process_block(snapshots[block_index].beacon_block.clone()),
.process_block(SignedBeaconBlock::from_block(block, junk_signature())),
Err(BlockError::InvalidSignature)
),
"should not import individual block with an invalid gossip signature",
@ -371,7 +379,9 @@ fn invalid_signature_block_proposal() {
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
snapshots[block_index].beacon_block.signature = junk_signature();
let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct();
snapshots[block_index].beacon_block =
SignedBeaconBlock::from_block(block.clone(), junk_signature());
let blocks = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
@ -395,11 +405,9 @@ fn invalid_signature_randao_reveal() {
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
snapshots[block_index]
.beacon_block
.message
.body
.randao_reveal = junk_signature();
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
*block.body_mut().randao_reveal_mut() = junk_signature();
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&harness, block_index, &snapshots, "randao");
@ -411,23 +419,23 @@ fn invalid_signature_proposer_slashing() {
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
let proposer_slashing = ProposerSlashing {
signed_header_1: SignedBeaconBlockHeader {
message: snapshots[block_index].beacon_block.message.block_header(),
message: block.block_header(),
signature: junk_signature(),
},
signed_header_2: SignedBeaconBlockHeader {
message: snapshots[block_index].beacon_block.message.block_header(),
message: block.block_header(),
signature: junk_signature(),
},
};
snapshots[block_index]
.beacon_block
.message
.body
.proposer_slashings
block
.body_mut()
.proposer_slashings_mut()
.push(proposer_slashing)
.expect("should update proposer slashing");
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing");
@ -460,13 +468,13 @@ fn invalid_signature_attester_slashing() {
attestation_1: indexed_attestation.clone(),
attestation_2: indexed_attestation,
};
snapshots[block_index]
.beacon_block
.message
.body
.attester_slashings
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
block
.body_mut()
.attester_slashings_mut()
.push(attester_slashing)
.expect("should update attester slashing");
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing");
@ -480,14 +488,10 @@ fn invalid_signature_attestation() {
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
if let Some(attestation) = snapshots[block_index]
.beacon_block
.message
.body
.attestations
.get_mut(0)
{
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) {
attestation.signature = junk_aggregate_signature();
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&harness, block_index, &snapshots, "attestation");
@ -516,13 +520,13 @@ fn invalid_signature_deposit() {
signature: junk_signature().into(),
},
};
snapshots[block_index]
.beacon_block
.message
.body
.deposits
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
block
.body_mut()
.deposits_mut()
.push(deposit)
.expect("should update deposit");
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
let blocks = snapshots
@ -548,11 +552,10 @@ fn invalid_signature_exit() {
let harness = get_invalid_sigs_harness();
let mut snapshots = CHAIN_SEGMENT.clone();
let epoch = snapshots[block_index].beacon_state.current_epoch();
snapshots[block_index]
.beacon_block
.message
.body
.voluntary_exits
let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct();
block
.body_mut()
.voluntary_exits_mut()
.push(SignedVoluntaryExit {
message: VoluntaryExit {
epoch,
@ -561,6 +564,7 @@ fn invalid_signature_exit() {
signature: junk_signature(),
})
.expect("should update deposit");
snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature);
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit");
@ -608,12 +612,15 @@ fn block_gossip_verification() {
* future blocks for processing at the appropriate slot).
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
let expected_block_slot = block.message.slot + 1;
block.message.slot = expected_block_slot;
let (mut block, signature) = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct();
let expected_block_slot = block.slot() + 1;
*block.slot_mut() = expected_block_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(block)),
unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))),
BlockError::FutureSlot {
present_slot,
block_slot,
@ -635,7 +642,10 @@ fn block_gossip_verification() {
* nodes, etc).
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
let (mut block, signature) = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct();
let expected_finalized_slot = harness
.chain
.head_info()
@ -643,10 +653,10 @@ fn block_gossip_verification() {
.finalized_checkpoint
.epoch
.start_slot(E::slots_per_epoch());
block.message.slot = expected_finalized_slot;
*block.slot_mut() = expected_finalized_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(block)),
unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))),
BlockError::WouldRevertFinalizedSlot {
block_slot,
finalized_slot,
@ -665,11 +675,21 @@ fn block_gossip_verification() {
* proposer_index pubkey.
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
block.signature = junk_signature();
let block = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct()
.0;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(block)),
unwrap_err(
harness
.chain
.verify_block_for_gossip(SignedBeaconBlock::from_block(
block,
junk_signature()
))
),
BlockError::ProposalSignatureInvalid
),
"should not import a block with an invalid proposal signature"
@ -683,12 +703,15 @@ fn block_gossip_verification() {
* The block's parent (defined by block.parent_root) passes validation.
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
let (mut block, signature) = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct();
let parent_root = Hash256::from_low_u64_be(42);
block.message.parent_root = parent_root;
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(block)),
unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))),
BlockError::ParentUnknown(block)
if block.parent_root() == parent_root
),
@ -705,12 +728,15 @@ fn block_gossip_verification() {
* store.finalized_checkpoint.root
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
let (mut block, signature) = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct();
let parent_root = CHAIN_SEGMENT[0].beacon_block_root;
block.message.parent_root = parent_root;
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(block)),
unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))),
BlockError::NotFinalizedDescendant { block_parent_root }
if block_parent_root == parent_root
),
@ -728,14 +754,18 @@ fn block_gossip_verification() {
* processing while proposers for the block's branch are calculated.
*/
let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone();
let expected_proposer = block.message.proposer_index;
let mut block = CHAIN_SEGMENT[block_index]
.beacon_block
.clone()
.deconstruct()
.0;
let expected_proposer = block.proposer_index();
let other_proposer = (0..VALIDATOR_COUNT as u64)
.into_iter()
.find(|i| *i != block.message.proposer_index)
.find(|i| *i != block.proposer_index())
.expect("there must be more than one validator in this test");
block.message.proposer_index = other_proposer;
let block = block.message.clone().sign(
*block.proposer_index_mut() = other_proposer;
let block = block.sign(
&generate_deterministic_keypair(other_proposer as usize).sk,
&harness.chain.head_info().unwrap().fork,
harness.chain.genesis_validators_root,
@ -760,7 +790,7 @@ fn block_gossip_verification() {
proposer,
slot,
}
if proposer == other_proposer && slot == block.message.slot
if proposer == other_proposer && slot == block.message().slot()
),
"should register any valid signature against the proposer, even if the block failed later verification"
);
@ -792,7 +822,7 @@ fn block_gossip_verification() {
proposer,
slot,
}
if proposer == block.message.proposer_index && slot == block.message.slot
if proposer == block.message().proposer_index() && slot == block.message().slot()
),
"the second proposal by this validator should be rejected"
);
@ -829,3 +859,245 @@ fn verify_block_for_gossip_slashing_detection() {
drop(slasher);
slasher_dir.close().unwrap();
}
#[test]
fn add_base_block_to_altair_chain() {
let mut spec = MainnetEthSpec::default_spec();
let slots_per_epoch = MainnetEthSpec::slots_per_epoch();
// The Altair fork happens at epoch 1.
spec.altair_fork_epoch = Some(Epoch::new(1));
let harness = BeaconChainHarness::new_with_chain_config(
MainnetEthSpec,
Some(spec),
KEYPAIRS[..].to_vec(),
1 << 32,
StoreConfig::default(),
ChainConfig::default(),
);
// Move out of the genesis slot.
harness.advance_slot();
// Build out all the blocks in epoch 0.
harness.extend_chain(
slots_per_epoch as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
// Move into the next empty slot.
harness.advance_slot();
// Produce an Altair block.
let state = harness.get_current_state();
let slot = harness.get_current_slot();
let (altair_signed_block, _) = harness.make_block(state.clone(), slot);
let altair_block = &altair_signed_block
.as_altair()
.expect("test expects an altair block")
.message;
let altair_body = &altair_block.body;
// Create a Base-equivalent of `altair_block`.
let base_block = SignedBeaconBlock::Base(SignedBeaconBlockBase {
message: BeaconBlockBase {
slot: altair_block.slot,
proposer_index: altair_block.proposer_index,
parent_root: altair_block.parent_root,
state_root: altair_block.state_root,
body: BeaconBlockBodyBase {
randao_reveal: altair_body.randao_reveal.clone(),
eth1_data: altair_body.eth1_data.clone(),
graffiti: altair_body.graffiti,
proposer_slashings: altair_body.proposer_slashings.clone(),
attester_slashings: altair_body.attester_slashings.clone(),
attestations: altair_body.attestations.clone(),
deposits: altair_body.deposits.clone(),
voluntary_exits: altair_body.voluntary_exits.clone(),
},
},
signature: Signature::empty(),
});
// Ensure that it would be impossible to apply this block to `per_block_processing`.
{
let mut state = state;
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
assert!(matches!(
per_block_processing(
&mut state,
&base_block,
None,
BlockSignatureStrategy::NoVerification,
&harness.chain.spec,
),
Err(BlockProcessingError::InconsistentBlockFork(
InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
}
))
));
}
// Ensure that it would be impossible to verify this block for gossip.
assert!(matches!(
harness
.chain
.verify_block_for_gossip(base_block.clone())
.err()
.expect("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
})
));
// Ensure that it would be impossible to import via `BeaconChain::process_block`.
assert!(matches!(
harness
.chain
.process_block(base_block.clone())
.err()
.expect("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
})
));
// Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`.
assert!(matches!(
harness.chain.process_chain_segment(vec![base_block]),
ChainSegmentResult::Failed {
imported_blocks: 0,
error: BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
})
}
));
}
#[test]
fn add_altair_block_to_base_chain() {
let mut spec = MainnetEthSpec::default_spec();
// Altair never happens.
spec.altair_fork_epoch = None;
let harness = BeaconChainHarness::new_with_chain_config(
MainnetEthSpec,
Some(spec),
KEYPAIRS[..].to_vec(),
1 << 32,
StoreConfig::default(),
ChainConfig::default(),
);
// Move out of the genesis slot.
harness.advance_slot();
// Build one block.
harness.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
// Move into the next empty slot.
harness.advance_slot();
// Produce an altair block.
let state = harness.get_current_state();
let slot = harness.get_current_slot();
let (base_signed_block, _) = harness.make_block(state.clone(), slot);
let base_block = &base_signed_block
.as_base()
.expect("test expects a base block")
.message;
let base_body = &base_block.body;
// Create an Altair-equivalent of `altair_block`.
let altair_block = SignedBeaconBlock::Altair(SignedBeaconBlockAltair {
message: BeaconBlockAltair {
slot: base_block.slot,
proposer_index: base_block.proposer_index,
parent_root: base_block.parent_root,
state_root: base_block.state_root,
body: BeaconBlockBodyAltair {
randao_reveal: base_body.randao_reveal.clone(),
eth1_data: base_body.eth1_data.clone(),
graffiti: base_body.graffiti,
proposer_slashings: base_body.proposer_slashings.clone(),
attester_slashings: base_body.attester_slashings.clone(),
attestations: base_body.attestations.clone(),
deposits: base_body.deposits.clone(),
voluntary_exits: base_body.voluntary_exits.clone(),
sync_aggregate: SyncAggregate::empty(),
},
},
signature: Signature::empty(),
});
// Ensure that it would be impossible to apply this block to `per_block_processing`.
{
let mut state = state;
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
assert!(matches!(
per_block_processing(
&mut state,
&altair_block,
None,
BlockSignatureStrategy::NoVerification,
&harness.chain.spec,
),
Err(BlockProcessingError::InconsistentBlockFork(
InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
}
))
));
}
// Ensure that it would be impossible to verify this block for gossip.
assert!(matches!(
harness
.chain
.verify_block_for_gossip(altair_block.clone())
.err()
.expect("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
})
));
// Ensure that it would be impossible to import via `BeaconChain::process_block`.
assert!(matches!(
harness
.chain
.process_block(altair_block.clone())
.err()
.expect("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
})
));
// Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`.
assert!(matches!(
harness.chain.process_chain_segment(vec![altair_block]),
ChainSegmentResult::Failed {
imported_blocks: 0,
error: BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
})
}
));
}

View File

@ -7,16 +7,12 @@ extern crate lazy_static;
use beacon_chain::observed_operations::ObservationOutcome;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
};
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
use store::{LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::test_utils::{
AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder,
TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder,
};
use types::*;
pub const VALIDATOR_COUNT: usize = 24;
@ -32,7 +28,7 @@ type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let spec = E::default_spec();
let spec = test_spec::<E>();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default();
@ -44,6 +40,7 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
None,
store,
KEYPAIRS[0..validator_count].to_vec(),
);
@ -64,21 +61,13 @@ fn voluntary_exit() {
AttestationStrategy::AllValidators,
);
let head_info = harness.chain.head_info().unwrap();
let make_exit = |validator_index: usize, exit_epoch: u64| {
TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build(
&KEYPAIRS[validator_index].sk,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
let validator_index1 = VALIDATOR_COUNT - 1;
let validator_index2 = VALIDATOR_COUNT - 2;
let exit1 = make_exit(validator_index1, spec.shard_committee_period);
let exit1 = harness.make_voluntary_exit(
validator_index1 as u64,
Epoch::new(spec.shard_committee_period),
);
// First verification should show it to be fresh.
assert!(matches!(
@ -98,14 +87,20 @@ fn voluntary_exit() {
));
// A different exit for the same validator should also be detected as a duplicate.
let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1);
let exit2 = harness.make_voluntary_exit(
validator_index1 as u64,
Epoch::new(spec.shard_committee_period + 1),
);
assert!(matches!(
harness.chain.verify_voluntary_exit_for_gossip(exit2),
Ok(ObservationOutcome::AlreadyKnown)
));
// Exit for a different validator should be fine.
let exit3 = make_exit(validator_index2, spec.shard_committee_period);
let exit3 = harness.make_voluntary_exit(
validator_index2 as u64,
Epoch::new(spec.shard_committee_period),
);
assert!(matches!(
harness
.chain
@ -120,25 +115,11 @@ fn proposer_slashing() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let head_info = harness.chain.head_info().unwrap();
let validator_index1 = VALIDATOR_COUNT - 1;
let validator_index2 = VALIDATOR_COUNT - 2;
let make_slashing = |validator_index: usize| {
TestingProposerSlashingBuilder::double_vote::<E>(
ProposerSlashingTestTask::Valid,
validator_index as u64,
&KEYPAIRS[validator_index].sk,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
let slashing1 = make_slashing(validator_index1);
let slashing1 = harness.make_proposer_slashing(validator_index1 as u64);
// First slashing for this proposer should be allowed.
assert!(matches!(
@ -171,7 +152,7 @@ fn proposer_slashing() {
));
// Proposer slashing for a different index should be accepted
let slashing3 = make_slashing(validator_index2);
let slashing3 = harness.make_proposer_slashing(validator_index2 as u64);
assert!(matches!(
harness
.chain
@ -186,9 +167,6 @@ fn attester_slashing() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let head_info = harness.chain.head_info().unwrap();
// First third of the validators
let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::<Vec<_>>();
@ -199,25 +177,8 @@ fn attester_slashing() {
// Last half of the validators
let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
let signer = |idx: u64, message: &[u8]| {
KEYPAIRS[idx as usize]
.sk
.sign(Hash256::from_slice(&message))
};
let make_slashing = |validators| {
TestingAttesterSlashingBuilder::double_vote::<_, E>(
AttesterSlashingTestTask::Valid,
validators,
signer,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
// Slashing for first third of validators should be accepted.
let slashing1 = make_slashing(&first_third);
let slashing1 = harness.make_attester_slashing(first_third);
assert!(matches!(
harness
.chain
@ -227,7 +188,7 @@ fn attester_slashing() {
));
// Overlapping slashing for first half of validators should also be accepted.
let slashing2 = make_slashing(&first_half);
let slashing2 = harness.make_attester_slashing(first_half);
assert!(matches!(
harness
.chain
@ -253,7 +214,7 @@ fn attester_slashing() {
));
// Slashing for last half of validators should be accepted (distinct from all existing)
let slashing3 = make_slashing(&second_half);
let slashing3 = harness.make_attester_slashing(second_half);
assert!(matches!(
harness
.chain
@ -262,7 +223,7 @@ fn attester_slashing() {
ObservationOutcome::New(_)
));
// Slashing for last third (contained in last half) should be rejected.
let slashing4 = make_slashing(&last_third);
let slashing4 = harness.make_attester_slashing(last_third);
assert!(matches!(
harness
.chain

View File

@ -1,161 +0,0 @@
#![cfg(not(debug_assertions))]
#[macro_use]
extern crate lazy_static;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
BeaconChain, BeaconChainTypes,
};
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
use store::{HotColdDB, LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::{EthSpec, Keypair, MinimalEthSpec};
type E = MinimalEthSpec;
// Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 24;
lazy_static! {
/// A cached set of keys.
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
let spec = E::default_spec();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default();
let log = NullLoggerBuilder.build().expect("logger should build");
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
.expect("disk store should initialize")
}
#[test]
fn finalizes_after_resuming_from_db() {
let validator_count = 16;
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8;
let first_half = num_blocks_produced / 2;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
store.clone(),
KEYPAIRS[0..validator_count].to_vec(),
);
harness.advance_slot();
harness.extend_chain(
first_half as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
assert!(
harness
.chain
.head()
.expect("should read head")
.beacon_state
.finalized_checkpoint
.epoch
> 0,
"the chain should have already finalized"
);
let latest_slot = harness.chain.slot().expect("should have a slot");
harness
.chain
.persist_head_and_fork_choice()
.expect("should persist the head and fork choice");
harness
.chain
.persist_op_pool()
.expect("should persist the op pool");
harness
.chain
.persist_eth1_cache()
.expect("should persist the eth1 cache");
let data_dir = harness.data_dir;
let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
MinimalEthSpec,
store,
KEYPAIRS[0..validator_count].to_vec(),
data_dir,
);
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
//
// This allows us to produce the block at the next slot.
resumed_harness
.chain
.slot_clock
.set_slot(latest_slot.as_u64() + 1);
resumed_harness.extend_chain(
(num_blocks_produced - first_half) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let state = &resumed_harness
.chain
.head()
.expect("should read head")
.beacon_state;
assert_eq!(
state.slot, num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
state.current_epoch(),
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint.epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint.epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);
}
/// Checks that two chains are the same, for the purpose of this tests.
///
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
assert_eq!(a.spec, b.spec, "spec should be equal");
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
assert_eq!(
a.head().unwrap(),
b.head().unwrap(),
"head() should be equal"
);
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
assert_eq!(
a.genesis_block_root, b.genesis_block_root,
"genesis_block_root should be equal"
);
let slot = a.slot().unwrap();
assert!(
a.fork_choice.write().get_head(slot).unwrap()
== b.fork_choice.write().get_head(slot).unwrap(),
"fork_choice heads should be equal"
);
}

View File

@ -2,9 +2,9 @@
use beacon_chain::attestation_verification::Error as AttnError;
use beacon_chain::test_utils::{
test_logger, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
test_logger, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
};
use beacon_chain::BeaconSnapshot;
use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconSnapshot};
use lazy_static::lazy_static;
use maplit::hashset;
use rand::Rng;
@ -34,7 +34,7 @@ type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
let spec = MinimalEthSpec::default_spec();
let spec = test_spec::<E>();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default();
@ -50,6 +50,7 @@ fn get_harness(
) -> TestHarness {
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
None,
store,
KEYPAIRS[0..validator_count].to_vec(),
);
@ -107,7 +108,11 @@ fn randomised_skips() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(state.slot, num_slots, "head should be at the current slot");
assert_eq!(
state.slot(),
num_slots,
"head should be at the current slot"
);
check_split_slot(&harness, store);
check_chain_dump(&harness, num_blocks_produced + 1);
@ -195,7 +200,7 @@ fn randao_genesis_storage() {
.head()
.expect("should get head")
.beacon_state
.randao_mixes
.randao_mixes()
.iter()
.find(|x| **x == genesis_value)
.is_some());
@ -212,7 +217,7 @@ fn randao_genesis_storage() {
.head()
.expect("should get head")
.beacon_state
.randao_mixes
.randao_mixes()
.iter()
.find(|x| **x == genesis_value)
.is_none());
@ -347,8 +352,12 @@ fn delete_blocks_and_states() {
let store = get_store(&db_path);
let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness =
BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store.clone(), validators_keypairs);
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
None,
store.clone(),
validators_keypairs,
);
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
@ -471,7 +480,7 @@ fn multi_epoch_fork_valid_blocks_test(
let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness =
BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store, validators_keypairs);
BeaconChainHarness::new_with_disk_store(MinimalEthSpec, None, store, validators_keypairs);
let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap();
let num_fork2_blocks: u64 = num_fork2_blocks_.try_into().unwrap();
@ -550,18 +559,21 @@ fn multiple_attestations_per_block() {
let head = harness.chain.head().unwrap();
let committees_per_slot = head
.beacon_state
.get_committee_count_at_slot(head.beacon_state.slot)
.get_committee_count_at_slot(head.beacon_state.slot())
.unwrap();
assert!(committees_per_slot > 1);
for snapshot in harness.chain.chain_dump().unwrap() {
let slot = snapshot.beacon_block.slot();
assert_eq!(
snapshot.beacon_block.message.body.attestations.len() as u64,
if snapshot.beacon_block.slot() <= 1 {
0
} else {
committees_per_slot
}
snapshot
.beacon_block
.deconstruct()
.0
.body()
.attestations()
.len() as u64,
if slot <= 1 { 0 } else { committees_per_slot }
);
}
}
@ -758,7 +770,7 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let slots_per_epoch = rig.slots_per_epoch();
let (mut state, state_root) = rig.get_current_state_and_root();
@ -863,7 +875,7 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let slots_per_epoch = rig.slots_per_epoch();
let (state, state_root) = rig.get_current_state_and_root();
@ -988,7 +1000,7 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let slots_per_epoch = rig.slots_per_epoch();
let (mut state, state_root) = rig.get_current_state_and_root();
@ -1078,7 +1090,7 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let (state, state_root) = rig.get_current_state_and_root();
// Fill up 0th epoch with canonical chain blocks
@ -1216,7 +1228,7 @@ fn prunes_skipped_slots_states() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let (state, state_root) = rig.get_current_state_and_root();
let canonical_slots_zeroth_epoch: Vec<Slot> =
@ -1335,7 +1347,7 @@ fn finalizes_non_epoch_start_slot() {
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs);
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
let (state, state_root) = rig.get_current_state_and_root();
let canonical_slots_zeroth_epoch: Vec<Slot> =
@ -1691,15 +1703,17 @@ fn garbage_collect_temp_states_from_failed_block() {
let genesis_state = harness.get_current_state();
let block_slot = Slot::new(2 * slots_per_epoch);
let (mut block, state) = harness.make_block(genesis_state, block_slot);
let (signed_block, state) = harness.make_block(genesis_state, block_slot);
let (mut block, _) = signed_block.deconstruct();
// Mutate the block to make it invalid, and re-sign it.
block.message.state_root = Hash256::repeat_byte(0xff);
let proposer_index = block.message.proposer_index as usize;
let block = block.message.sign(
*block.state_root_mut() = Hash256::repeat_byte(0xff);
let proposer_index = block.proposer_index() as usize;
let block = block.sign(
&harness.validator_keypairs[proposer_index].sk,
&state.fork,
state.genesis_validators_root,
&state.fork(),
state.genesis_validators_root(),
&harness.spec,
);
@ -1720,12 +1734,143 @@ fn garbage_collect_temp_states_from_failed_block() {
assert_eq!(store.iter_temporary_state_roots().count(), 0);
}
#[test]
fn finalizes_after_resuming_from_db() {
let validator_count = 16;
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8;
let first_half = num_blocks_produced / 2;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
None,
store.clone(),
KEYPAIRS[0..validator_count].to_vec(),
);
harness.advance_slot();
harness.extend_chain(
first_half as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
assert!(
harness
.chain
.head()
.expect("should read head")
.beacon_state
.finalized_checkpoint()
.epoch
> 0,
"the chain should have already finalized"
);
let latest_slot = harness.chain.slot().expect("should have a slot");
harness
.chain
.persist_head_and_fork_choice()
.expect("should persist the head and fork choice");
harness
.chain
.persist_op_pool()
.expect("should persist the op pool");
harness
.chain
.persist_eth1_cache()
.expect("should persist the eth1 cache");
let data_dir = harness.data_dir;
let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
MinimalEthSpec,
None,
store,
KEYPAIRS[0..validator_count].to_vec(),
data_dir,
);
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
//
// This allows us to produce the block at the next slot.
resumed_harness
.chain
.slot_clock
.set_slot(latest_slot.as_u64() + 1);
resumed_harness.extend_chain(
(num_blocks_produced - first_half) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let state = &resumed_harness
.chain
.head()
.expect("should read head")
.beacon_state;
assert_eq!(
state.slot(),
num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
state.current_epoch(),
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint().epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint().epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);
}
/// Checks that two chains are the same, for the purpose of these tests.
///
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
assert_eq!(a.spec, b.spec, "spec should be equal");
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
assert_eq!(
a.head().unwrap(),
b.head().unwrap(),
"head() should be equal"
);
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
assert_eq!(
a.genesis_block_root, b.genesis_block_root,
"genesis_block_root should be equal"
);
let slot = a.slot().unwrap();
assert!(
a.fork_choice.write().get_head(slot).unwrap()
== b.fork_choice.write().get_head(slot).unwrap(),
"fork_choice heads should be equal"
);
}
/// Check that the head state's slot matches `expected_slot`.
fn check_slot(harness: &TestHarness, expected_slot: u64) {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot, expected_slot,
state.slot(),
expected_slot,
"head should be at the current slot"
);
}
@ -1737,12 +1882,12 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
check_slot(harness, expected_slot);
assert_eq!(
state.current_justified_checkpoint.epoch,
state.current_justified_checkpoint().epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint.epoch,
state.finalized_checkpoint().epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);
@ -1757,7 +1902,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, L
.head()
.expect("should get head")
.beacon_state
.finalized_checkpoint
.finalized_checkpoint()
.epoch
.start_slot(E::slots_per_epoch()),
split_slot
@ -1788,8 +1933,8 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
.get_state(&checkpoint.beacon_state_root(), None)
.expect("no error")
.expect("state exists")
.slot,
checkpoint.beacon_state.slot
.slot(),
checkpoint.beacon_state.slot()
);
}
@ -1864,7 +2009,7 @@ fn get_finalized_epoch_boundary_blocks(
) -> HashSet<SignedBeaconBlockHash> {
dump.iter()
.cloned()
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into())
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
.collect()
}

View File

@ -29,6 +29,7 @@ lazy_static! {
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
None,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
);
@ -41,7 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
#[test]
fn massive_skips() {
let harness = get_harness(8);
let spec = &MinimalEthSpec::default_spec();
let spec = &harness.chain.spec;
let mut state = harness.chain.head().expect("should get head").beacon_state;
// Run per_slot_processing until it returns an error.
@ -52,7 +53,7 @@ fn massive_skips() {
}
};
assert!(state.slot > 1, "the state should skip at least one slot");
assert!(state.slot() > 1, "the state should skip at least one slot");
assert_eq!(
error,
SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError(
@ -134,7 +135,7 @@ fn iterators() {
assert_eq!(
*state_roots.last().expect("should have some state roots"),
(head.beacon_state_root(), head.beacon_state.slot),
(head.beacon_state_root(), head.beacon_state.slot()),
"last state root and slot should be for the head state"
);
}
@ -153,7 +154,7 @@ fn find_reorgs() {
);
let head_state = harness.chain.head_beacon_state().unwrap();
let head_slot = head_state.slot;
let head_slot = head_state.slot();
let genesis_state = harness
.chain
.state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots)
@ -167,7 +168,7 @@ fn find_reorgs() {
.find_reorg_slot(&genesis_state, harness.chain.genesis_block_root)
.unwrap(),
head_state
.finalized_checkpoint
.finalized_checkpoint()
.epoch
.start_slot(MinimalEthSpec::slots_per_epoch())
);
@ -237,7 +238,7 @@ fn chooses_fork() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot,
state.slot(),
Slot::from(initial_blocks + honest_fork_blocks),
"head should be at the current slot"
);
@ -268,7 +269,8 @@ fn finalizes_with_full_participation() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot, num_blocks_produced,
state.slot(),
num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
@ -277,12 +279,12 @@ fn finalizes_with_full_participation() {
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint.epoch,
state.current_justified_checkpoint().epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint.epoch,
state.finalized_checkpoint().epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);
@ -306,7 +308,8 @@ fn finalizes_with_two_thirds_participation() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot, num_blocks_produced,
state.slot(),
num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
@ -320,12 +323,12 @@ fn finalizes_with_two_thirds_participation() {
// included in blocks during that epoch.
assert_eq!(
state.current_justified_checkpoint.epoch,
state.current_justified_checkpoint().epoch,
state.current_epoch() - 2,
"the head should be justified two behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint.epoch,
state.finalized_checkpoint().epoch,
state.current_epoch() - 4,
"the head should be finalized three behind the current epoch"
);
@ -350,7 +353,8 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot, num_blocks_produced,
state.slot(),
num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
@ -359,11 +363,13 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint.epoch, 0,
state.current_justified_checkpoint().epoch,
0,
"no epoch should have been justified"
);
assert_eq!(
state.finalized_checkpoint.epoch, 0,
state.finalized_checkpoint().epoch,
0,
"no epoch should have been finalized"
);
}
@ -383,7 +389,8 @@ fn does_not_finalize_without_attestation() {
let state = &harness.chain.head().expect("should get head").beacon_state;
assert_eq!(
state.slot, num_blocks_produced,
state.slot(),
num_blocks_produced,
"head should be at the current slot"
);
assert_eq!(
@ -392,11 +399,13 @@ fn does_not_finalize_without_attestation() {
"head should be at the expected epoch"
);
assert_eq!(
state.current_justified_checkpoint.epoch, 0,
state.current_justified_checkpoint().epoch,
0,
"no epoch should have been justified"
);
assert_eq!(
state.finalized_checkpoint.epoch, 0,
state.finalized_checkpoint().epoch,
0,
"no epoch should have been finalized"
);
}
@ -681,7 +690,14 @@ fn block_roots_skip_slot_behaviour() {
let harness = get_harness(VALIDATOR_COUNT);
// Test should be longer than the block roots to ensure a DB lookup is triggered.
let chain_length = harness.chain.head().unwrap().beacon_state.block_roots.len() as u64 * 3;
let chain_length = harness
.chain
.head()
.unwrap()
.beacon_state
.block_roots()
.len() as u64
* 3;
let skipped_slots = [1, 6, 7, 10, chain_length];

View File

@ -19,7 +19,6 @@ use network::{NetworkConfig, NetworkMessage, NetworkService};
use slasher::Slasher;
use slasher_service::SlasherService;
use slog::{debug, info, warn};
use ssz::Decode;
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@ -196,7 +195,7 @@ where
"Starting from known genesis state";
);
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes)
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
builder.genesis_state(genesis_state).map(|v| (v, None))?

View File

@ -13,7 +13,7 @@ use std::io::ErrorKind;
use std::io::{Read, Write};
use std::marker::PhantomData;
use tokio_util::codec::{Decoder, Encoder};
use types::{EthSpec, SignedBeaconBlock};
use types::{EthSpec, SignedBeaconBlock, SignedBeaconBlockBase};
use unsigned_varint::codec::Uvi;
/* Inbound Codec */
@ -298,12 +298,18 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
Protocol::Goodbye => Err(RPCError::InvalidData),
Protocol::BlocksByRange => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
// FIXME(altair): support Altair blocks
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(
&decoded_buffer,
)?),
)))),
},
Protocol::BlocksByRoot => match self.protocol.version {
// FIXME(altair): support Altair blocks
Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(
&decoded_buffer,
)?),
)))),
},
Protocol::Ping => match self.protocol.version {

View File

@ -354,10 +354,10 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
match self {
RPCResponse::Status(status) => write!(f, "{}", status),
RPCResponse::BlocksByRange(block) => {
write!(f, "BlocksByRange: Block slot: {}", block.message.slot)
write!(f, "BlocksByRange: Block slot: {}", block.slot())
}
RPCResponse::BlocksByRoot(block) => {
write!(f, "BlocksByRoot: BLock slot: {}", block.message.slot)
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
}
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number),

View File

@ -24,16 +24,16 @@ use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeac
lazy_static! {
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
// same across different `EthSpec` implementations.
pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::<MainnetEthSpec> {
message: BeaconBlock::empty(&MainnetEthSpec::default_spec()),
signature: Signature::empty(),
}
pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::empty(&MainnetEthSpec::default_spec()),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::<MainnetEthSpec> {
message: BeaconBlock::full(&MainnetEthSpec::default_spec()),
signature: Signature::empty(),
}
pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::<MainnetEthSpec>::from_block(
BeaconBlock::full(&MainnetEthSpec::default_spec()),
Signature::empty(),
)
.as_ssz_bytes()
.len();
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize =

View File

@ -10,7 +10,7 @@ use std::io::{Error, ErrorKind};
use types::SubnetId;
use types::{
Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof,
SignedBeaconBlock, SignedVoluntaryExit,
SignedBeaconBlock, SignedBeaconBlockBase, SignedVoluntaryExit,
};
#[derive(Debug, Clone, PartialEq)]
@ -141,8 +141,11 @@ impl<T: EthSpec> PubsubMessage<T> {
))))
}
GossipKind::BeaconBlock => {
let beacon_block = SignedBeaconBlock::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
// FIXME(altair): support Altair blocks
let beacon_block = SignedBeaconBlock::Base(
SignedBeaconBlockBase::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
);
Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block)))
}
GossipKind::VoluntaryExit => {
@ -189,7 +192,8 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
PubsubMessage::BeaconBlock(block) => write!(
f,
"Beacon Block: slot: {}, proposer_index: {}",
block.message.slot, block.message.proposer_index
block.slot(),
block.message().proposer_index()
),
PubsubMessage::AggregateAndProofAttestation(att) => write!(
f,

View File

@ -140,10 +140,7 @@ fn test_blocks_by_range_chunked_rpc() {
// BlocksByRange Response
let spec = E::default_spec();
let empty_block = BeaconBlock::empty(&spec);
let empty_signed = SignedBeaconBlock {
message: empty_block,
signature: Signature::empty(),
};
let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty());
let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
// keep count of the number of messages received
@ -257,10 +254,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
// BlocksByRange Response
let spec = E::default_spec();
let empty_block = BeaconBlock::empty(&spec);
let empty_signed = SignedBeaconBlock {
message: empty_block,
signature: Signature::empty(),
};
let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty());
let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
// keep count of the number of messages received
@ -390,10 +384,7 @@ fn test_blocks_by_range_single_empty_rpc() {
// BlocksByRange Response
let spec = E::default_spec();
let empty_block = BeaconBlock::empty(&spec);
let empty_signed = SignedBeaconBlock {
message: empty_block,
signature: Signature::empty(),
};
let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty());
let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
let messages_to_send = 1;
@ -510,10 +501,7 @@ fn test_blocks_by_root_chunked_rpc() {
// BlocksByRoot Response
let full_block = BeaconBlock::full(&spec);
let signed_full_block = SignedBeaconBlock {
message: full_block,
signature: Signature::empty(),
};
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block)));
// keep count of the number of messages received
@ -634,10 +622,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// BlocksByRoot Response
let full_block = BeaconBlock::full(&spec);
let signed_full_block = SignedBeaconBlock {
message: full_block,
signature: Signature::empty(),
};
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block)));
// keep count of the number of messages received

View File

@ -5,7 +5,7 @@ use eth1::{DepositLog, Eth1Block, Service as Eth1Service};
use slog::{debug, error, info, trace, Logger};
use state_processing::{
eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state,
per_block_processing::process_deposit, process_activations,
per_block_processing::process_operations::process_deposit, process_activations,
};
use std::sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
@ -190,7 +190,7 @@ impl Eth1GenesisService {
.get_active_validator_indices(E::genesis_epoch(), &spec)
.map_err(|e| format!("Genesis validators error: {:?}", e))?
.len(),
"genesis_time" => genesis_state.genesis_time,
"genesis_time" => genesis_state.genesis_time(),
);
break Ok(genesis_state);
}

View File

@ -48,10 +48,12 @@ pub fn interop_genesis_state<T: EthSpec>(
)
.map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?;
state.genesis_time = genesis_time;
*state.genesis_time_mut() = genesis_time;
// Invalid all the caches after all the manual state surgery.
state.drop_all_caches();
// Invalidate all the caches after all the manual state surgery.
state
.drop_all_caches()
.map_err(|e| format!("Unable to drop caches: {:?}", e))?;
Ok(state)
}
@ -75,24 +77,25 @@ mod test {
.expect("should build state");
assert_eq!(
state.eth1_data.block_hash,
state.eth1_data().block_hash,
Hash256::from_slice(&[0x42; 32]),
"eth1 block hash should be co-ordinated junk"
);
assert_eq!(
state.genesis_time, genesis_time,
state.genesis_time(),
genesis_time,
"genesis time should be as specified"
);
for b in &state.balances {
for b in state.balances() {
assert_eq!(
*b, spec.max_effective_balance,
"validator balances should be max effective balance"
);
}
for v in &state.validators {
for v in state.validators() {
let creds = v.withdrawal_credentials.as_bytes();
assert_eq!(
creds[0], spec.bls_withdrawal_prefix_byte,
@ -106,13 +109,13 @@ mod test {
}
assert_eq!(
state.balances.len(),
state.balances().len(),
validator_count,
"validator balances len should be correct"
);
assert_eq!(
state.validators.len(),
state.validators().len(),
validator_count,
"validator count should be correct"
);

View File

@ -92,12 +92,12 @@ fn basic() {
// Note: using ganache these deposits are 1-per-block, therefore we know there should only be
// the minimum number of validators.
assert_eq!(
state.validators.len(),
state.validators().len(),
spec.min_genesis_active_validator_count as usize,
"should have expected validator count"
);
assert!(state.genesis_time > 0, "should have some genesis time");
assert!(state.genesis_time() > 0, "should have some genesis time");
assert!(
is_valid_genesis_state(&state, &spec),

View File

@ -37,8 +37,9 @@ use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
use types::{
Attestation, AttesterSlashing, CommitteeCache, Epoch, EthSpec, ProposerSlashing, RelativeEpoch,
SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
Attestation, AttesterSlashing, CommitteeCache, ConfigAndPreset, Epoch, EthSpec,
ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock,
SignedVoluntaryExit, Slot,
};
use warp::http::StatusCode;
use warp::sse::Event;
@ -75,6 +76,7 @@ pub struct Config {
pub listen_addr: Ipv4Addr,
pub listen_port: u16,
pub allow_origin: Option<String>,
pub serve_legacy_spec: bool,
}
impl Default for Config {
@ -84,6 +86,7 @@ impl Default for Config {
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
listen_port: 5052,
allow_origin: None,
serve_legacy_spec: true,
}
}
}
@ -332,7 +335,8 @@ pub fn serve<T: BeaconChainTypes>(
.untuple_one();
// Create a `warp` filter that provides access to the logger.
let log_filter = warp::any().map(move || ctx.log.clone());
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
/*
*
@ -407,9 +411,9 @@ pub fn serve<T: BeaconChainTypes>(
state_id
.map_state(&chain, |state| {
Ok(api_types::FinalityCheckpointsData {
previous_justified: state.previous_justified_checkpoint,
current_justified: state.current_justified_checkpoint,
finalized: state.finalized_checkpoint,
previous_justified: state.previous_justified_checkpoint(),
current_justified: state.current_justified_checkpoint(),
finalized: state.finalized_checkpoint(),
})
})
.map(api_types::GenericResponse::from)
@ -430,9 +434,9 @@ pub fn serve<T: BeaconChainTypes>(
state_id
.map_state(&chain, |state| {
Ok(state
.validators
.validators()
.iter()
.zip(state.balances.iter())
.zip(state.balances().iter())
.enumerate()
// filter by validator id(s) if provided
.filter(|(index, (validator, _))| {
@ -475,9 +479,9 @@ pub fn serve<T: BeaconChainTypes>(
let far_future_epoch = chain.spec.far_future_epoch;
Ok(state
.validators
.validators()
.iter()
.zip(state.balances.iter())
.zip(state.balances().iter())
.enumerate()
// filter by validator id(s) if provided
.filter(|(index, (validator, _))| {
@ -541,15 +545,15 @@ pub fn serve<T: BeaconChainTypes>(
.map_state(&chain, |state| {
let index_opt = match &validator_id {
ValidatorId::PublicKey(pubkey) => {
state.validators.iter().position(|v| v.pubkey == *pubkey)
state.validators().iter().position(|v| v.pubkey == *pubkey)
}
ValidatorId::Index(index) => Some(*index as usize),
};
index_opt
.and_then(|index| {
let validator = state.validators.get(index)?;
let balance = *state.balances.get(index)?;
let validator = state.validators().get(index)?;
let balance = *state.balances().get(index)?;
let epoch = state.current_epoch();
let far_future_epoch = chain.spec.far_future_epoch;
@ -591,7 +595,7 @@ pub fn serve<T: BeaconChainTypes>(
blocking_json_task(move || {
query_state_id.map_state(&chain, |state| {
let epoch = state.slot.epoch(T::EthSpec::slots_per_epoch());
let epoch = state.slot().epoch(T::EthSpec::slots_per_epoch());
let committee_cache = if state
.committee_cache_is_initialized(RelativeEpoch::Current)
@ -725,8 +729,8 @@ pub fn serve<T: BeaconChainTypes>(
root,
canonical: true,
header: api_types::BlockHeaderAndSignature {
message: block.message.block_header(),
signature: block.signature.into(),
message: block.message().block_header(),
signature: block.signature().clone().into(),
},
};
@ -760,8 +764,8 @@ pub fn serve<T: BeaconChainTypes>(
root,
canonical,
header: api_types::BlockHeaderAndSignature {
message: block.message.block_header(),
signature: block.signature.into(),
message: block.message().block_header(),
signature: block.signature().clone().into(),
},
};
@ -799,7 +803,7 @@ pub fn serve<T: BeaconChainTypes>(
// Determine the delay after the start of the slot, register it with metrics.
let delay =
get_block_delay_ms(seen_timestamp, &block.message, &chain.slot_clock);
get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
metrics::observe_duration(
&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES,
delay,
@ -817,7 +821,7 @@ pub fn serve<T: BeaconChainTypes>(
// Notify the validator monitor.
chain.validator_monitor.read().register_api_block(
seen_timestamp,
&block.message,
block.message(),
root,
&chain.slot_clock,
);
@ -935,7 +939,8 @@ pub fn serve<T: BeaconChainTypes>(
blocking_json_task(move || {
block_id
.block(&chain)
.map(|block| block.message.body.attestations)
// FIXME(altair): could avoid clone with by-value accessor
.map(|block| block.message().body().attestations().clone())
.map(api_types::GenericResponse::from)
})
});
@ -1266,17 +1271,19 @@ pub fn serve<T: BeaconChainTypes>(
});
// GET config/spec
let serve_legacy_spec = ctx.config.serve_legacy_spec;
let get_config_spec = config_path
.and(warp::path("spec"))
.and(warp::path::end())
.and(chain_filter.clone())
.and_then(|chain: Arc<BeaconChain<T>>| {
.and_then(move |chain: Arc<BeaconChain<T>>| {
blocking_json_task(move || {
Ok(api_types::GenericResponse::from(YamlConfig::from_spec::<
T::EthSpec,
>(
&chain.spec
)))
let mut config_and_preset =
ConfigAndPreset::from_chain_spec::<T::EthSpec>(&chain.spec);
if serve_legacy_spec {
config_and_preset.make_backwards_compat(&chain.spec);
}
Ok(api_types::GenericResponse::from(config_and_preset))
})
});

View File

@ -148,7 +148,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>(
state.current_epoch(),
dependent_root,
indices.clone(),
state.fork,
state.fork(),
)
.map_err(BeaconChainError::from)
.map_err(warp_utils::reject::beacon_chain_error)?;

View File

@ -57,7 +57,7 @@ impl StateId {
&self,
chain: &BeaconChain<T>,
) -> Result<Fork, warp::Rejection> {
self.map_state(chain, |state| Ok(state.fork))
self.map_state(chain, |state| Ok(state.fork()))
}
/// Return the `BeaconState` identified by `self`.

View File

@ -20,7 +20,7 @@ pub fn global_validator_inclusion_data<T: BeaconChainTypes>(
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
validator_statuses
.process_attestations(&state, &chain.spec)
.process_attestations(&state)
.map_err(warp_utils::reject::beacon_state_error)?;
let totals = validator_statuses.total_balances;
@ -49,7 +49,7 @@ pub fn validator_inclusion_data<T: BeaconChainTypes>(
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
validator_statuses
.process_attestations(&state, &chain.spec)
.process_attestations(&state)
.map_err(warp_utils::reject::beacon_state_error)?;
state

View File

@ -23,11 +23,9 @@ use sensitive_url::SensitiveUrl;
use slot_clock::SlotClock;
use state_processing::per_slot_processing;
use std::convert::TryInto;
use std::iter::Iterator;
use std::net::Ipv4Addr;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::sync::{mpsc, oneshot};
use tokio::time::Duration;
use tree_hash::TreeHash;
use types::{
@ -77,6 +75,7 @@ impl ApiTester {
pub fn new() -> Self {
let mut harness = BeaconChainHarness::new(
MainnetEthSpec,
None,
generate_deterministic_keypairs(VALIDATOR_COUNT),
);
@ -189,6 +188,7 @@ impl ApiTester {
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
listen_port: 0,
allow_origin: None,
serve_legacy_spec: true,
},
chain: Some(chain.clone()),
network_tx: Some(network_tx),
@ -235,6 +235,7 @@ impl ApiTester {
pub fn new_from_genesis() -> Self {
let harness = BeaconChainHarness::new(
MainnetEthSpec,
None,
generate_deterministic_keypairs(VALIDATOR_COUNT),
);
@ -301,6 +302,7 @@ impl ApiTester {
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
listen_port: 0,
allow_origin: None,
serve_legacy_spec: true,
},
chain: Some(chain.clone()),
network_tx: Some(network_tx),
@ -445,8 +447,8 @@ impl ApiTester {
let state = self.chain.head().unwrap().beacon_state;
let expected = GenesisData {
genesis_time: state.genesis_time,
genesis_validators_root: state.genesis_validators_root,
genesis_time: state.genesis_time(),
genesis_validators_root: state.genesis_validators_root(),
genesis_fork_version: self.chain.spec.genesis_fork_version,
};
@ -508,7 +510,7 @@ impl ApiTester {
.unwrap()
.map(|res| res.data);
let expected = self.get_state(state_id).map(|state| state.fork);
let expected = self.get_state(state_id).map(|state| state.fork());
assert_eq!(result, expected, "{:?}", state_id);
}
@ -528,9 +530,9 @@ impl ApiTester {
let expected = self
.get_state(state_id)
.map(|state| FinalityCheckpointsData {
previous_justified: state.previous_justified_checkpoint,
current_justified: state.current_justified_checkpoint,
finalized: state.finalized_checkpoint,
previous_justified: state.previous_justified_checkpoint(),
current_justified: state.current_justified_checkpoint(),
finalized: state.finalized_checkpoint(),
});
assert_eq!(result, expected, "{:?}", state_id);
@ -544,7 +546,7 @@ impl ApiTester {
for validator_indices in self.interesting_validator_indices() {
let state_opt = self.get_state(state_id);
let validators: Vec<Validator> = match state_opt.as_ref() {
Some(state) => state.validators.clone().into(),
Some(state) => state.validators().clone().into(),
None => vec![],
};
let validator_index_ids = validator_indices
@ -587,10 +589,10 @@ impl ApiTester {
let mut validators = Vec::with_capacity(validator_indices.len());
for i in validator_indices {
if i < state.balances.len() as u64 {
if i < state.balances().len() as u64 {
validators.push(ValidatorBalanceData {
index: i as u64,
balance: state.balances[i as usize],
balance: state.balances()[i as usize],
});
}
}
@ -612,7 +614,7 @@ impl ApiTester {
for validator_indices in self.interesting_validator_indices() {
let state_opt = self.get_state(state_id);
let validators: Vec<Validator> = match state_opt.as_ref() {
Some(state) => state.validators.clone().into(),
Some(state) => state.validators().clone().into(),
None => vec![],
};
let validator_index_ids = validator_indices
@ -661,10 +663,10 @@ impl ApiTester {
let mut validators = Vec::with_capacity(validator_indices.len());
for i in validator_indices {
if i >= state.validators.len() as u64 {
if i >= state.validators().len() as u64 {
continue;
}
let validator = state.validators[i as usize].clone();
let validator = state.validators()[i as usize].clone();
let status = ValidatorStatus::from_validator(
&validator,
epoch,
@ -676,7 +678,7 @@ impl ApiTester {
{
validators.push(ValidatorData {
index: i as u64,
balance: state.balances[i as usize],
balance: state.balances()[i as usize],
status,
validator,
});
@ -699,7 +701,7 @@ impl ApiTester {
for state_id in self.interesting_state_ids() {
let state_opt = self.get_state(state_id);
let validators = match state_opt.as_ref() {
Some(state) => state.validators.clone().into(),
Some(state) => state.validators().clone().into(),
None => vec![],
};
@ -729,7 +731,7 @@ impl ApiTester {
ValidatorData {
index: i as u64,
balance: state.balances[i],
balance: state.balances()[i],
status: ValidatorStatus::from_validator(
&validator,
epoch,
@ -846,8 +848,8 @@ impl ApiTester {
root,
canonical: true,
header: BlockHeaderAndSignature {
message: block.message.block_header(),
signature: block.signature.into(),
message: block.message().block_header(),
signature: block.signature().clone().into(),
},
};
let expected = vec![header];
@ -927,13 +929,13 @@ impl ApiTester {
assert_eq!(result.root, block_root, "{:?}", block_id);
assert_eq!(
result.header.message,
block.message.block_header(),
block.message().block_header(),
"{:?}",
block_id
);
assert_eq!(
result.header.signature,
block.signature.into(),
block.signature().clone().into(),
"{:?}",
block_id
);
@ -980,7 +982,7 @@ impl ApiTester {
pub async fn test_post_beacon_blocks_invalid(mut self) -> Self {
let mut next_block = self.next_block.clone();
next_block.message.proposer_index += 1;
*next_block.message_mut().proposer_index_mut() += 1;
assert!(self.client.post_beacon_blocks(&next_block).await.is_err());
@ -1012,7 +1014,11 @@ impl ApiTester {
.map(|res| res.data);
assert_eq!(json_result, expected, "{:?}", block_id);
let ssz_result = self.client.get_beacon_blocks_ssz(block_id).await.unwrap();
let ssz_result = self
.client
.get_beacon_blocks_ssz(block_id, &self.chain.spec)
.await
.unwrap();
assert_eq!(ssz_result, expected, "{:?}", block_id);
}
@ -1030,7 +1036,7 @@ impl ApiTester {
let expected = self
.get_block(block_id)
.map(|block| block.message.body.attestations.into());
.map(|block| block.message().body().attestations().clone().into());
if let BlockId::Slot(slot) = block_id {
if expected.is_none() {
@ -1264,7 +1270,8 @@ impl ApiTester {
pub async fn test_get_config_spec(self) -> Self {
let result = self.client.get_config_spec().await.unwrap().data;
let expected = YamlConfig::from_spec::<E>(&self.chain.spec);
let mut expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec);
expected.make_backwards_compat(&self.chain.spec);
assert_eq!(result, expected);
@ -1432,7 +1439,7 @@ impl ApiTester {
for state_id in self.interesting_state_ids() {
let result_ssz = self
.client
.get_debug_beacon_states_ssz(state_id)
.get_debug_beacon_states_ssz(state_id, &self.chain.spec)
.await
.unwrap();
let result_json = self
@ -1471,7 +1478,7 @@ impl ApiTester {
}
fn validator_count(&self) -> usize {
self.chain.head().unwrap().beacon_state.validators.len()
self.chain.head().unwrap().beacon_state.validators().len()
}
fn interesting_validator_indices(&self) -> Vec<Vec<u64>> {
@ -1575,7 +1582,7 @@ impl ApiTester {
let expected_len = indices
.iter()
.filter(|i| **i < state.validators.len() as u64)
.filter(|i| **i < state.validators().len() as u64)
.count();
assert_eq!(result_duties.len(), expected_len);
@ -1586,7 +1593,7 @@ impl ApiTester {
.unwrap()
{
let expected = AttesterData {
pubkey: state.validators[i as usize].pubkey.clone().into(),
pubkey: state.validators()[i as usize].pubkey.clone().into(),
validator_index: i,
committees_at_slot: duty.committees_at_slot,
committee_index: duty.index,
@ -1691,7 +1698,7 @@ impl ApiTester {
let index = state
.get_beacon_proposer_index(slot, &self.chain.spec)
.unwrap();
let pubkey = state.validators[index].pubkey.clone().into();
let pubkey = state.validators()[index].pubkey.clone().into();
ProposerData {
pubkey,
@ -1849,7 +1856,7 @@ impl ApiTester {
pub async fn test_get_validator_attestation_data(self) -> Self {
let mut state = self.chain.head_beacon_state().unwrap();
let slot = state.slot;
let slot = state.slot();
state
.build_committee_cache(RelativeEpoch::Current, &self.chain.spec)
.unwrap();
@ -1879,9 +1886,9 @@ impl ApiTester {
.chain
.head_beacon_block()
.unwrap()
.message
.body
.attestations[0]
.message()
.body()
.attestations()[0]
.clone();
let result = self
@ -1915,7 +1922,7 @@ impl ApiTester {
.unwrap();
let committee_len = head.beacon_state.get_committee_count_at_slot(slot).unwrap();
let fork = head.beacon_state.fork;
let fork = head.beacon_state.fork();
let genesis_validators_root = self.chain.genesis_validators_root;
let duties = self
@ -2118,7 +2125,7 @@ impl ApiTester {
for state_id in self.interesting_state_ids() {
let result = self
.client
.get_lighthouse_beacon_states_ssz(&state_id)
.get_lighthouse_beacon_states_ssz(&state_id, &self.chain.spec)
.await
.unwrap();

View File

@ -43,7 +43,7 @@ lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
task_executor = { path = "../../common/task_executor" }
igd = "0.11.1"
itertools = "0.9.0"
itertools = "0.10.0"
num_cpus = "1.13.0"
lru_cache = { path = "../../common/lru_cache" }
if-addrs = "0.6.4"

View File

@ -3,10 +3,10 @@
use crate::beacon_processor::*;
use crate::{service::NetworkMessage, sync::SyncMessage};
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use discv5::enr::{CombinedKey, EnrBuilder};
use environment::{null_logger, Environment, EnvironmentBuilder};
use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId};
@ -66,6 +66,7 @@ impl TestRig {
pub fn new(chain_length: u64) -> Self {
let mut harness = BeaconChainHarness::new(
MainnetEthSpec,
None,
generate_deterministic_keypairs(VALIDATOR_COUNT),
);

View File

@ -244,7 +244,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// Log metrics to track delay from other nodes on the network.
metrics::observe_duration(
&metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME,
get_block_delay_ms(seen_duration, &block.message, &self.chain.slot_clock),
get_block_delay_ms(seen_duration, block.message(), &self.chain.slot_clock),
);
let verified_block = match self.chain.verify_block_for_gossip(block) {
@ -305,6 +305,7 @@ impl<T: BeaconChainTypes> Worker<T> {
| Err(e @ BlockError::InvalidSignature)
| Err(e @ BlockError::TooManySkippedSlots { .. })
| Err(e @ BlockError::WeakSubjectivityConflict)
| Err(e @ BlockError::InconsistentFork(_))
| Err(e @ BlockError::GenesisBlock) => {
warn!(self.log, "Could not verify block for gossip, rejecting the block";
"error" => %e);
@ -322,7 +323,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// verified.
self.chain.validator_monitor.read().register_gossip_block(
seen_duration,
&verified_block.block.message,
verified_block.block.message(),
verified_block.block_root,
&self.chain.slot_clock,
);

View File

@ -57,8 +57,8 @@ impl<T: BeaconChainTypes> Worker<T> {
match process_id {
// this a request from the range sync
ProcessId::RangeBatchId(chain_id, epoch) => {
let start_slot = downloaded_blocks.first().map(|b| b.message.slot.as_u64());
let end_slot = downloaded_blocks.last().map(|b| b.message.slot.as_u64());
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
let sent_blocks = downloaded_blocks.len();
let result = match self.process_blocks(downloaded_blocks.iter()) {

View File

@ -277,7 +277,7 @@ fn spawn_service<T: BeaconChainTypes>(
.map(|current_epoch| {
head
.beacon_state
.validators
.validators()
.iter()
.filter(|validator|
validator.is_active_at(current_epoch)

View File

@ -38,6 +38,7 @@ mod tests {
let beacon_chain = Arc::new(
BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
None,
generate_deterministic_keypairs(8),
StoreConfig::default(),
)

View File

@ -331,8 +331,13 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// check if the parent of this block isn't in our failed cache. If it is, this
// chain should be dropped and the peer downscored.
if self.failed_chains.contains(&block.message.parent_root) {
debug!(self.log, "Parent chain ignored due to past failure"; "block" => ?block.message.parent_root, "slot" => block.message.slot);
if self.failed_chains.contains(&block.message().parent_root()) {
debug!(
self.log,
"Parent chain ignored due to past failure";
"block" => ?block.message().parent_root(),
"slot" => block.slot()
);
if !parent_request.downloaded_blocks.is_empty() {
// Add the root block to failed chains
self.failed_chains
@ -490,7 +495,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.head_info()
.map(|info| info.slot)
.unwrap_or_else(|_| Slot::from(0u64));
let unknown_block_slot = block.message.slot;
let unknown_block_slot = block.slot();
// if the block is far in the future, ignore it. If its within the slot tolerance of
// our current head, regardless of the syncing state, fetch it.
@ -505,10 +510,10 @@ impl<T: BeaconChainTypes> SyncManager<T> {
let block_root = block.canonical_root();
// If this block or it's parent is part of a known failed chain, ignore it.
if self.failed_chains.contains(&block.message.parent_root)
if self.failed_chains.contains(&block.message().parent_root())
|| self.failed_chains.contains(&block_root)
{
debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root, "block_slot" => block.message.slot);
debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root, "block_slot" => block.slot());
return;
}
@ -525,7 +530,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
}
debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => %block.canonical_root());
debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.slot(), "block_hash" => %block.canonical_root());
let parent_request = ParentRequests {
downloaded_blocks: vec![block],

View File

@ -21,3 +21,5 @@ store = { path = "../store" }
[dev-dependencies]
rand = "0.7.3"
lazy_static = "1.4.0"
beacon_chain = { path = "../beacon_chain" }

View File

@ -1,7 +1,13 @@
use crate::max_cover::MaxCover;
use state_processing::common::{get_attesting_indices, get_base_reward};
use state_processing::common::{
altair, base, get_attestation_participation_flag_indices, get_attesting_indices,
};
use std::collections::HashMap;
use types::{Attestation, BeaconState, BitList, ChainSpec, EthSpec};
use types::{
beacon_state::BeaconStateBase,
consts::altair::{PARTICIPATION_FLAG_WEIGHTS, WEIGHT_DENOMINATOR},
Attestation, BeaconState, BitList, ChainSpec, EthSpec,
};
#[derive(Debug, Clone)]
pub struct AttMaxCover<'a, T: EthSpec> {
@ -18,7 +24,22 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
total_active_balance: u64,
spec: &ChainSpec,
) -> Option<Self> {
let fresh_validators = earliest_attestation_validators(att, state);
if let BeaconState::Base(ref base_state) = state {
Self::new_for_base(att, state, base_state, total_active_balance, spec)
} else {
Self::new_for_altair(att, state, total_active_balance, spec)
}
}
/// Initialise an attestation cover object for base/phase0 hard fork.
pub fn new_for_base(
att: &'a Attestation<T>,
state: &BeaconState<T>,
base_state: &BeaconStateBase<T>,
total_active_balance: u64,
spec: &ChainSpec,
) -> Option<Self> {
let fresh_validators = earliest_attestation_validators(att, state, base_state);
let committee = state
.get_beacon_committee(att.data.slot, att.data.index)
.ok()?;
@ -27,10 +48,14 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
.iter()
.map(|i| *i as u64)
.flat_map(|validator_index| {
let reward =
get_base_reward(state, validator_index as usize, total_active_balance, spec)
.ok()?
/ spec.proposer_reward_quotient;
let reward = base::get_base_reward(
state,
validator_index as usize,
total_active_balance,
spec,
)
.ok()?
.checked_div(spec.proposer_reward_quotient)?;
Some((validator_index, reward))
})
.collect();
@ -39,6 +64,62 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
fresh_validators_rewards,
})
}
/// Initialise an attestation cover object for Altair or later.
pub fn new_for_altair(
att: &'a Attestation<T>,
state: &BeaconState<T>,
total_active_balance: u64,
spec: &ChainSpec,
) -> Option<Self> {
let committee = state
.get_beacon_committee(att.data.slot, att.data.index)
.ok()?;
let attesting_indices =
get_attesting_indices::<T>(committee.committee, &att.aggregation_bits).ok()?;
let participation_list = if att.data.target.epoch == state.current_epoch() {
state.current_epoch_participation().ok()?
} else if att.data.target.epoch == state.previous_epoch() {
state.previous_epoch_participation().ok()?
} else {
return None;
};
let inclusion_delay = state.slot().as_u64().checked_sub(att.data.slot.as_u64())?;
let att_participation_flags =
get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec)
.ok()?;
let fresh_validators_rewards = attesting_indices
.iter()
.filter_map(|&index| {
let mut proposer_reward_numerator = 0;
let participation = participation_list.get(index)?;
let base_reward =
altair::get_base_reward(state, index, total_active_balance, spec).ok()?;
for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() {
if att_participation_flags.contains(&flag_index)
&& !participation.has_flag(flag_index).ok()?
{
proposer_reward_numerator += base_reward.checked_mul(*weight)?;
}
}
let proposer_reward = proposer_reward_numerator
.checked_div(WEIGHT_DENOMINATOR.checked_mul(spec.proposer_reward_quotient)?)?;
Some((index as u64, proposer_reward)).filter(|_| proposer_reward != 0)
})
.collect();
Some(Self {
att,
fresh_validators_rewards,
})
}
}
impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
@ -58,6 +139,11 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
/// confusing committees when updating covering sets, we update only those attestations
/// whose slot and index match the attestation being included in the solution, by the logic
/// that a slot and index uniquely identify a committee.
///
/// We completely remove any validator covered by another attestation. This is close to optimal
/// because including two attestations on chain to satisfy different participation bits is
/// impossible without the validator double voting. I.e. it is only suboptimal in the presence
/// of slashable voting, which is rare.
fn update_covering_set(
&mut self,
best_att: &Attestation<T>,
@ -81,19 +167,20 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
/// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations`
/// depending on when it was created, and all those validators who have already attested are
/// removed from the `aggregation_bits` before returning it.
// TODO: This could be optimised with a map from validator index to whether that validator has
// attested in each of the current and previous epochs. Currently quadratic in number of validators.
///
/// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading.
pub fn earliest_attestation_validators<T: EthSpec>(
attestation: &Attestation<T>,
state: &BeaconState<T>,
base_state: &BeaconStateBase<T>,
) -> BitList<T::MaxValidatorsPerCommittee> {
// Bitfield of validators whose attestations are new/fresh.
let mut new_validators = attestation.aggregation_bits.clone();
let state_attestations = if attestation.data.target.epoch == state.current_epoch() {
&state.current_epoch_attestations
&base_state.current_epoch_attestations
} else if attestation.data.target.epoch == state.previous_epoch() {
&state.previous_epoch_attestations
&base_state.previous_epoch_attestations
} else {
return BitList::with_capacity(0).unwrap();
};

View File

@ -1,7 +1,7 @@
use crate::max_cover::MaxCover;
use state_processing::per_block_processing::get_slashable_indices_modular;
use std::collections::{HashMap, HashSet};
use types::{AttesterSlashing, BeaconState, ChainSpec, EthSpec};
use types::{AttesterSlashing, BeaconState, EthSpec};
#[derive(Debug, Clone)]
pub struct AttesterSlashingMaxCover<'a, T: EthSpec> {
@ -14,7 +14,6 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> {
slashing: &'a AttesterSlashing<T>,
proposer_slashing_indices: &HashSet<u64>,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Option<Self> {
let mut effective_balances: HashMap<u64, u64> = HashMap::new();
let epoch = state.current_epoch();
@ -22,21 +21,18 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> {
let slashable_validators =
get_slashable_indices_modular(state, slashing, |index, validator| {
validator.is_slashable_at(epoch) && !proposer_slashing_indices.contains(&index)
});
if let Ok(validators) = slashable_validators {
for vd in &validators {
let eff_balance = state.get_effective_balance(*vd as usize, spec).ok()?;
effective_balances.insert(*vd, eff_balance);
}
Some(Self {
slashing,
effective_balances,
})
} else {
None
.ok()?;
for vd in slashable_validators {
let eff_balance = state.get_effective_balance(vd as usize).ok()?;
effective_balances.insert(vd, eff_balance);
}
Some(Self {
slashing,
effective_balances,
})
}
}

File diff suppressed because it is too large Load Diff

View File

@ -198,6 +198,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
address of this server (e.g., http://localhost:5052).")
.takes_value(true),
)
.arg(
Arg::with_name("http-disable-legacy-spec")
.long("http-disable-legacy-spec")
.help("Disable serving of legacy data on the /config/spec endpoint. May be \
disabled by default in a future release.")
)
/* Prometheus metrics HTTP server related arguments */
.arg(
Arg::with_name("metrics")

View File

@ -107,6 +107,10 @@ pub fn get_config<E: EthSpec>(
client_config.http_api.allow_origin = Some(allow_origin.to_string());
}
if cli_args.is_present("http-disable-legacy-spec") {
client_config.http_api.serve_legacy_spec = false;
}
/*
* Prometheus metrics HTTP server
*/

View File

@ -4,20 +4,15 @@ version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[[bench]]
name = "benches"
harness = false
[dev-dependencies]
tempfile = "3.1.0"
criterion = "0.3.3"
rayon = "1.4.1"
beacon_chain = {path = "../beacon_chain"}
[dependencies]
db-key = "0.0.5"
leveldb = { version = "0.8.6", default-features = false }
parking_lot = "0.11.0"
itertools = "0.9.0"
itertools = "0.10.0"
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
tree_hash = "0.1.1"

View File

@ -1,115 +0,0 @@
#![allow(deprecated)]
use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
use rayon::prelude::*;
use ssz::{Decode, Encode};
use std::convert::TryInto;
use store::BeaconStateStorageContainer;
use types::{
test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256,
MainnetEthSpec, Validator,
};
fn get_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
let mut state = BeaconState::new(0, eth1_data, spec);
for i in 0..validator_count {
state.balances.push(i as u64).expect("should add balance");
}
state.validators = (0..validator_count)
.collect::<Vec<_>>()
.par_iter()
.map(|&i| Validator {
pubkey: generate_deterministic_keypair(i).pk.into(),
withdrawal_credentials: Hash256::from_low_u64_le(i as u64),
effective_balance: spec.max_effective_balance,
slashed: false,
activation_eligibility_epoch: Epoch::new(0),
activation_epoch: Epoch::new(0),
exit_epoch: Epoch::from(u64::max_value()),
withdrawable_epoch: Epoch::from(u64::max_value()),
})
.collect::<Vec<_>>()
.into();
state.build_all_caches(spec).expect("should build caches");
state
}
fn all_benches(c: &mut Criterion) {
let validator_count = 16_384;
let state = get_state::<MainnetEthSpec>(validator_count);
let storage_container = BeaconStateStorageContainer::new(&state);
let state_bytes = storage_container.as_ssz_bytes();
let inner_state = state.clone();
c.bench(
&format!("{}_validators", validator_count),
Benchmark::new("encode/beacon_state", move |b| {
b.iter_batched_ref(
|| inner_state.clone(),
|state| black_box(BeaconStateStorageContainer::new(state).as_ssz_bytes()),
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let inner_state = state.clone();
c.bench(
&format!("{}_validators", validator_count),
Benchmark::new("encode/beacon_state/tree_hash_cache", move |b| {
b.iter_batched_ref(
|| inner_state.tree_hash_cache.clone(),
|tree_hash_cache| black_box(tree_hash_cache.as_ssz_bytes()),
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let inner_state = state;
c.bench(
&format!("{}_validators", validator_count),
Benchmark::new("encode/beacon_state/committee_cache[0]", move |b| {
b.iter_batched_ref(
|| inner_state.committee_caches[0].clone(),
|committee_cache| black_box(committee_cache.as_ssz_bytes()),
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
c.bench(
&format!("{}_validators", validator_count),
Benchmark::new("decode/beacon_state", move |b| {
b.iter_batched_ref(
|| state_bytes.clone(),
|bytes| {
let state: BeaconState<MainnetEthSpec> =
BeaconStateStorageContainer::from_ssz_bytes(&bytes)
.expect("should decode")
.try_into()
.expect("should convert into state");
black_box(state)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
}
criterion_group!(benches, all_benches,);
criterion_main!(benches);

View File

@ -1,62 +0,0 @@
//! These examples only really exist so we can use them for flamegraph. If they get annoying to
//! maintain, feel free to delete.
use rayon::prelude::*;
use ssz::{Decode, Encode};
use std::convert::TryInto;
use store::BeaconStateStorageContainer;
use types::{
test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256,
MainnetEthSpec, Validator,
};
type E = MainnetEthSpec;
fn get_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
let mut state = BeaconState::new(0, eth1_data, spec);
for i in 0..validator_count {
state.balances.push(i as u64).expect("should add balance");
}
state.validators = (0..validator_count)
.collect::<Vec<_>>()
.par_iter()
.map(|&i| Validator {
pubkey: generate_deterministic_keypair(i).pk.into(),
withdrawal_credentials: Hash256::from_low_u64_le(i as u64),
effective_balance: spec.max_effective_balance,
slashed: false,
activation_eligibility_epoch: Epoch::new(0),
activation_epoch: Epoch::new(0),
exit_epoch: Epoch::from(u64::max_value()),
withdrawable_epoch: Epoch::from(u64::max_value()),
})
.collect::<Vec<_>>()
.into();
state.build_all_caches(spec).expect("should build caches");
state
}
fn main() {
let validator_count = 1_024;
let state = get_state::<E>(validator_count);
let storage_container = BeaconStateStorageContainer::new(&state);
for _ in 0..1024 {
let container_bytes = storage_container.as_ssz_bytes();
let _: BeaconState<E> = BeaconStateStorageContainer::from_ssz_bytes(&container_bytes)
.expect("should decode")
.try_into()
.expect("should convert into state");
}
}

View File

@ -226,12 +226,12 @@ pub trait Field<E: EthSpec>: Copy {
/// Extract the genesis value for a fixed length field from an
///
/// Will only return a correct value if `slot_needs_genesis_value(state.slot, spec) == true`.
/// Will only return a correct value if `slot_needs_genesis_value(state.slot(), spec) == true`.
fn extract_genesis_value(
state: &BeaconState<E>,
spec: &ChainSpec,
) -> Result<Self::Value, Error> {
let (_, end_vindex) = Self::start_and_end_vindex(state.slot, spec);
let (_, end_vindex) = Self::start_and_end_vindex(state.slot(), spec);
match Self::update_pattern(spec) {
// Genesis value is guaranteed to exist at `end_vindex`, as it won't yet have been
// updated
@ -295,7 +295,7 @@ field!(
T::SlotsPerHistoricalRoot,
DBColumn::BeaconBlockRoots,
|_| OncePerNSlots { n: 1 },
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.block_roots, index)
|state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index)
);
field!(
@ -305,7 +305,7 @@ field!(
T::SlotsPerHistoricalRoot,
DBColumn::BeaconStateRoots,
|_| OncePerNSlots { n: 1 },
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.state_roots, index)
|state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index)
);
field!(
@ -317,7 +317,7 @@ field!(
|_| OncePerNSlots {
n: T::SlotsPerHistoricalRoot::to_u64()
},
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.historical_roots, index)
|state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index)
);
field!(
@ -327,7 +327,7 @@ field!(
T::EpochsPerHistoricalVector,
DBColumn::BeaconRandaoMixes,
|_| OncePerEpoch { lag: 1 },
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index)
|state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index)
);
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
@ -338,12 +338,12 @@ pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
let chunk_size = F::chunk_size();
let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot, spec);
let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot(), spec);
let start_cindex = start_vindex / chunk_size;
let end_cindex = end_vindex / chunk_size;
// Store the genesis value if we have access to it, and it hasn't been stored already.
if F::slot_needs_genesis_value(state.slot, spec) {
if F::slot_needs_genesis_value(state.slot(), spec) {
let genesis_value = F::extract_genesis_value(state, spec)?;
F::check_and_store_genesis_value(store, genesis_value, ops)?;
}

View File

@ -71,7 +71,7 @@ impl SimpleForwardsBlockRootsIterator {
) -> Result<Self> {
// Iterate backwards from the end state, stopping at the start slot.
let values = process_results(
std::iter::once(Ok((end_block_root, end_state.slot)))
std::iter::once(Ok((end_block_root, end_state.slot())))
.chain(BlockRootsIterator::owned(store, end_state)),
|iter| {
iter.take_while(|(_, slot)| *slot >= start_slot)
@ -237,7 +237,7 @@ impl SimpleForwardsStateRootsIterator {
) -> Result<Self> {
// Iterate backwards from the end state, stopping at the start slot.
let values = process_results(
std::iter::once(Ok((end_state_root, end_state.slot)))
std::iter::once(Ok((end_state_root, end_state.slot())))
.chain(StateRootsIterator::owned(store, end_state)),
|iter| {
iter.take_while(|(_, slot)| *slot >= start_slot)

View File

@ -3,7 +3,10 @@ use crate::chunked_vector::{
};
use crate::config::{OnDiskStoreConfig, StoreConfig};
use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator};
use crate::impls::beacon_state::{get_full_state, store_full_state};
use crate::impls::{
beacon_block_as_kv_store_op,
beacon_state::{get_full_state, store_full_state},
};
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
use crate::leveldb_store::BytesKey;
use crate::leveldb_store::LevelDB;
@ -240,7 +243,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
block: SignedBeaconBlock<E>,
) -> Result<(), Error> {
// Store on disk.
self.hot_db.put(block_root, &block)?;
self.hot_db
.do_atomically(vec![beacon_block_as_kv_store_op(block_root, &block)])?;
// Update cache.
self.block_cache.lock().put(*block_root, block);
@ -259,20 +263,34 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
// Fetch from database.
match self.hot_db.get::<SignedBeaconBlock<E>>(block_root)? {
Some(block) => {
match self
.hot_db
.get_bytes(DBColumn::BeaconBlock.into(), block_root.as_bytes())?
{
Some(block_bytes) => {
// Deserialize.
let block = SignedBeaconBlock::from_ssz_bytes(&block_bytes, &self.spec)?;
// Add to cache.
self.block_cache.lock().put(*block_root, block.clone());
Ok(Some(block))
}
None => Ok(None),
}
}
/// Determine whether a block exists in the database.
pub fn block_exists(&self, block_root: &Hash256) -> Result<bool, Error> {
self.hot_db
.key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes())
}
/// Delete a block from the store and the block cache.
pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
self.block_cache.lock().pop(block_root);
self.hot_db.delete::<SignedBeaconBlock<E>>(block_root)
self.hot_db
.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())
}
pub fn put_state_summary(
@ -286,7 +304,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Store a state in the store.
pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
if state.slot < self.get_split_slot() {
if state.slot() < self.get_split_slot() {
self.store_cold_state(state_root, &state, &mut ops)?;
self.cold_db.do_atomically(ops)
} else {
@ -456,7 +474,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
for op in batch {
match op {
StoreOp::PutBlock(block_root, block) => {
key_value_batch.push(block.as_kv_store_op(*block_root));
key_value_batch.push(beacon_block_as_kv_store_op(block_root, block));
}
StoreOp::PutState(state_root, state) => {
@ -538,11 +556,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
// On the epoch boundary, store the full state.
if state.slot % E::slots_per_epoch() == 0 {
if state.slot() % E::slots_per_epoch() == 0 {
trace!(
self.log,
"Storing full state on epoch boundary";
"slot" => state.slot.as_u64(),
"slot" => state.slot().as_u64(),
"state_root" => format!("{:?}", state_root)
);
store_full_state(state_root, &state, ops)?;
@ -580,9 +598,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
epoch_boundary_state_root,
}) = self.load_hot_state_summary(state_root)?
{
let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?.ok_or(
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root),
)?;
let boundary_state =
get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or(
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root),
)?;
// Optimization to avoid even *thinking* about replaying blocks if we're already
// on an epoch boundary.
@ -590,7 +609,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
boundary_state
} else {
let blocks =
self.load_blocks_to_replay(boundary_state.slot, slot, latest_block_root)?;
self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?;
self.replay_blocks(boundary_state, blocks, slot, block_replay)?
};
@ -610,11 +629,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state: &BeaconState<E>,
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
if state.slot % self.config.slots_per_restore_point != 0 {
if state.slot() % self.config.slots_per_restore_point != 0 {
warn!(
self.log,
"Not storing non-restore point state in freezer";
"slot" => state.slot.as_u64(),
"slot" => state.slot().as_u64(),
"state_root" => format!("{:?}", state_root)
);
return Ok(());
@ -623,7 +642,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
trace!(
self.log,
"Creating restore point";
"slot" => state.slot,
"slot" => state.slot(),
"state_root" => format!("{:?}", state_root)
);
@ -640,7 +659,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?;
// 3. Store restore point.
let restore_point_index = state.slot.as_u64() / self.config.slots_per_restore_point;
let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point;
self.store_restore_point_hash(restore_point_index, *state_root, ops);
Ok(())
@ -670,10 +689,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
/// Load a restore point state by its `state_root`.
fn load_restore_point(&self, state_root: &Hash256) -> Result<BeaconState<E>, Error> {
let mut partial_state: PartialBeaconState<E> = self
let partial_state_bytes = self
.cold_db
.get(state_root)?
.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())?
.ok_or_else(|| HotColdDBError::MissingRestorePoint(*state_root))?;
let mut partial_state: PartialBeaconState<E> =
PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?;
// Fill in the fields of the partial state.
partial_state.load_block_roots(&self.cold_db, &self.spec)?;
@ -717,7 +738,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
// 2. Load the blocks from the high restore point back to the low restore point.
let blocks = self.load_blocks_to_replay(
low_restore_point.slot,
low_restore_point.slot(),
slot,
self.get_high_restore_point_block_root(&high_restore_point, slot)?,
)?;
@ -759,14 +780,14 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.filter(|result| {
result
.as_ref()
.map_or(true, |block| block.message.slot <= end_slot)
.map_or(true, |block| block.slot() <= end_slot)
})
// Include the block at the start slot (if any). Whilst it doesn't need to be applied
// to the state, it contains a potentially useful state root.
.take_while(|result| {
result
.as_ref()
.map_or(true, |block| block.message.slot >= start_slot)
.map_or(true, |block| block.slot() >= start_slot)
})
.collect::<Result<_, _>>()?;
blocks.reverse();
@ -786,18 +807,36 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
) -> Result<BeaconState<E>, Error> {
if block_replay == BlockReplay::InconsistentStateRoots {
for i in 0..blocks.len() {
blocks[i].message.state_root = Hash256::zero();
let prev_block_root = if i > 0 {
blocks[i - 1].canonical_root()
} else {
// Not read.
Hash256::zero()
};
let (state_root, parent_root) = match &mut blocks[i] {
SignedBeaconBlock::Base(block) => (
&mut block.message.state_root,
&mut block.message.parent_root,
),
SignedBeaconBlock::Altair(block) => (
&mut block.message.state_root,
&mut block.message.parent_root,
),
};
*state_root = Hash256::zero();
if i > 0 {
blocks[i].message.parent_root = blocks[i - 1].canonical_root()
*parent_root = prev_block_root;
}
}
}
let state_root_from_prev_block = |i: usize, state: &BeaconState<E>| {
if i > 0 {
let prev_block = &blocks[i - 1].message;
if prev_block.slot == state.slot {
Some(prev_block.state_root)
let prev_block = blocks[i - 1].message();
if prev_block.slot() == state.slot() {
Some(prev_block.state_root())
} else {
None
}
@ -807,11 +846,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
};
for (i, block) in blocks.iter().enumerate() {
if block.message.slot <= state.slot {
if block.slot() <= state.slot() {
continue;
}
while state.slot < block.message.slot {
while state.slot() < block.slot() {
let state_root = match block_replay {
BlockReplay::Accurate => state_root_from_prev_block(i, &state),
BlockReplay::InconsistentStateRoots => Some(Hash256::zero()),
@ -830,7 +869,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
.map_err(HotColdDBError::BlockReplayBlockError)?;
}
while state.slot < target_slot {
while state.slot() < target_slot {
let state_root = match block_replay {
BlockReplay::Accurate => state_root_from_prev_block(blocks.len(), &state),
BlockReplay::InconsistentStateRoots => Some(Hash256::zero()),
@ -1011,7 +1050,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
debug!(
store.log,
"Freezer migration started";
"slot" => frozen_head.slot
"slot" => frozen_head.slot()
);
// 0. Check that the migration is sensible.
@ -1019,16 +1058,16 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// boundary (in order for the hot state summary scheme to work).
let current_split_slot = store.split.read().slot;
if frozen_head.slot < current_split_slot {
if frozen_head.slot() < current_split_slot {
return Err(HotColdDBError::FreezeSlotError {
current_split_slot,
proposed_split_slot: frozen_head.slot,
proposed_split_slot: frozen_head.slot(),
}
.into());
}
if frozen_head.slot % E::slots_per_epoch() != 0 {
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into());
if frozen_head.slot() % E::slots_per_epoch() != 0 {
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into());
}
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
@ -1045,7 +1084,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
if slot % store.config.slots_per_restore_point == 0 {
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root)?
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root, &store.spec)?
.ok_or(HotColdDBError::MissingStateToFreeze(state_root))?;
store.store_cold_state(&state_root, &state, &mut cold_db_ops)?;
@ -1102,7 +1141,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
// Before updating the in-memory split value, we flush it to disk first, so that should the
// OS process die at this point, we pick up from the right place after a restart.
let split = Split {
slot: frozen_head.slot,
slot: frozen_head.slot(),
state_root: frozen_head_root,
};
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
@ -1119,7 +1158,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
debug!(
store.log,
"Freezer migration complete";
"slot" => frozen_head.slot
"slot" => frozen_head.slot()
);
Ok(())
@ -1176,8 +1215,8 @@ impl HotStateSummary {
// Fill in the state root on the latest block header if necessary (this happens on all
// slots where there isn't a skip).
let latest_block_root = state.get_latest_block_root(*state_root);
let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch();
let epoch_boundary_state_root = if epoch_boundary_slot == state.slot {
let epoch_boundary_slot = state.slot() / E::slots_per_epoch() * E::slots_per_epoch();
let epoch_boundary_state_root = if epoch_boundary_slot == state.slot() {
*state_root
} else {
*state
@ -1186,7 +1225,7 @@ impl HotStateSummary {
};
Ok(HotStateSummary {
slot: state.slot,
slot: state.slot(),
latest_block_root,
epoch_boundary_state_root,
})

View File

@ -1,35 +1,15 @@
use crate::*;
use ssz::{Decode, Encode};
use ssz::Encode;
pub mod beacon_state;
pub mod partial_beacon_state;
impl<T: EthSpec> StoreItem for SignedBeaconBlock<T> {
fn db_column() -> DBColumn {
DBColumn::BeaconBlock
}
fn as_store_bytes(&self) -> Vec<u8> {
let timer = metrics::start_timer(&metrics::BEACON_BLOCK_WRITE_TIMES);
let bytes = self.as_ssz_bytes();
metrics::stop_timer(timer);
metrics::inc_counter(&metrics::BEACON_BLOCK_WRITE_COUNT);
metrics::inc_counter_by(&metrics::BEACON_BLOCK_WRITE_BYTES, bytes.len() as u64);
bytes
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
let timer = metrics::start_timer(&metrics::BEACON_BLOCK_READ_TIMES);
let len = bytes.len();
let result = Self::from_ssz_bytes(bytes).map_err(Into::into);
metrics::stop_timer(timer);
metrics::inc_counter(&metrics::BEACON_BLOCK_READ_COUNT);
metrics::inc_counter_by(&metrics::BEACON_BLOCK_READ_BYTES, len as u64);
result
}
/// Prepare a signed beacon block for storage in the database.
#[must_use]
pub fn beacon_block_as_kv_store_op<T: EthSpec>(
key: &Hash256,
block: &SignedBeaconBlock<T>,
) -> KeyValueStoreOp {
// FIXME(altair): re-add block write/overhead metrics, or remove them
let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes());
KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes())
}

View File

@ -1,6 +1,6 @@
use crate::*;
use ssz::{Decode, DecodeError, Encode};
use ssz_derive::{Decode, Encode};
use ssz::{DecodeError, Encode};
use ssz_derive::Encode;
use std::convert::TryInto;
use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS};
@ -23,13 +23,14 @@ pub fn store_full_state<E: EthSpec>(
pub fn get_full_state<KV: KeyValueStore<E>, E: EthSpec>(
db: &KV,
state_root: &Hash256,
spec: &ChainSpec,
) -> Result<Option<BeaconState<E>>, Error> {
let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES);
match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? {
Some(bytes) => {
let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES);
let container = StorageContainer::from_ssz_bytes(&bytes)?;
let container = StorageContainer::from_ssz_bytes(&bytes, spec)?;
metrics::stop_timer(overhead_timer);
metrics::stop_timer(total_timer);
@ -44,7 +45,7 @@ pub fn get_full_state<KV: KeyValueStore<E>, E: EthSpec>(
/// A container for storing `BeaconState` components.
// TODO: would be more space efficient with the caches stored separately and referenced by hash
#[derive(Encode, Decode)]
#[derive(Encode)]
pub struct StorageContainer<T: EthSpec> {
state: BeaconState<T>,
committee_caches: Vec<CommitteeCache>,
@ -55,9 +56,28 @@ impl<T: EthSpec> StorageContainer<T> {
pub fn new(state: &BeaconState<T>) -> Self {
Self {
state: state.clone_with(CloneConfig::none()),
committee_caches: state.committee_caches.to_vec(),
committee_caches: state.committee_caches().to_vec(),
}
}
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
// We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't
// compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here.
let mut builder = ssz::SszDecoderBuilder::new(bytes);
builder.register_anonymous_variable_length_item()?;
builder.register_type::<Vec<CommitteeCache>>()?;
let mut decoder = builder.build()?;
let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?;
let committee_caches = decoder.decode_next()?;
Ok(Self {
state,
committee_caches,
})
}
}
impl<T: EthSpec> TryInto<BeaconState<T>> for StorageContainer<T> {
@ -73,7 +93,7 @@ impl<T: EthSpec> TryInto<BeaconState<T>> for StorageContainer<T> {
)));
};
state.committee_caches[i] = self.committee_caches.remove(i);
state.committee_caches_mut()[i] = self.committee_caches.remove(i);
}
Ok(state)

View File

@ -1,16 +0,0 @@
use crate::*;
use ssz::{Decode, Encode};
impl<T: EthSpec> StoreItem for PartialBeaconState<T> {
fn db_column() -> DBColumn {
DBColumn::BeaconState
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
Ok(Self::from_ssz_bytes(bytes)?)
}
}

View File

@ -27,7 +27,7 @@ impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Option<BlockRootsIterator<'a, E, Hot, Cold>> {
let state = store
.get_state(&self.message.state_root, Some(self.message.slot))
.get_state(&self.message().state_root(), Some(self.slot()))
.ok()??;
Some(BlockRootsIterator::owned(store, state))
@ -161,7 +161,7 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T,
pub fn new(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: &'a BeaconState<T>) -> Self {
Self {
store,
slot: beacon_state.slot,
slot: beacon_state.slot(),
beacon_state: Cow::Borrowed(beacon_state),
}
}
@ -169,7 +169,7 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T,
pub fn owned(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: BeaconState<T>) -> Self {
Self {
store,
slot: beacon_state.slot,
slot: beacon_state.slot(),
beacon_state: Cow::Owned(beacon_state),
}
}
@ -188,7 +188,7 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T,
}
fn do_next(&mut self) -> Result<Option<(Hash256, Hash256, Slot)>, Error> {
if self.slot == 0 || self.slot > self.beacon_state.slot {
if self.slot == 0 || self.slot > self.beacon_state.slot() {
return Ok(None);
}
@ -257,7 +257,7 @@ impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
.store
.get_block(&block_root)?
.ok_or(Error::BlockNotFound(block_root))?;
self.next_block_root = block.message.parent_root;
self.next_block_root = block.message().parent_root();
Ok(Some((block_root, block)))
}
}
@ -323,7 +323,7 @@ fn next_historical_root_backtrack_state<E: EthSpec, Hot: ItemStore<E>, Cold: Ite
// a restore point slot (thus avoiding replaying blocks). In the case where we're
// not frozen, this just means we might not jump back by the maximum amount on
// our first jump (i.e. at most 1 extra state load).
let new_state_slot = slot_of_prev_restore_point::<E>(current_state.slot);
let new_state_slot = slot_of_prev_restore_point::<E>(current_state.slot());
let new_state_root = current_state.get_state_root(new_state_slot)?;
Ok(store
.get_state(new_state_root, Some(new_state_slot))?
@ -339,46 +339,50 @@ fn slot_of_prev_restore_point<E: EthSpec>(current_slot: Slot) -> Slot {
#[cfg(test)]
mod test {
use super::*;
use crate::config::StoreConfig;
use crate::HotColdDB;
use crate::StoreConfig as Config;
use beacon_chain::store::StoreConfig;
use beacon_chain::test_utils::BeaconChainHarness;
use beacon_chain::types::{ChainSpec, Keypair, MainnetEthSpec};
use sloggers::{null::NullLoggerBuilder, Build};
use types::{test_utils::TestingBeaconStateBuilder, ChainSpec, Keypair, MainnetEthSpec};
fn get_state<T: EthSpec>() -> BeaconState<T> {
let builder = TestingBeaconStateBuilder::from_single_keypair(
0,
&Keypair::random(),
&T::default_spec(),
let harness = BeaconChainHarness::new_with_store_config(
T::default(),
None,
vec![Keypair::random()],
StoreConfig::default(),
);
let (state, _keypairs) = builder.build();
state
harness.advance_slot();
harness.get_current_state()
}
#[test]
fn block_root_iter() {
let log = NullLoggerBuilder.build().unwrap();
let store = Arc::new(
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(),
HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(),
);
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
state_a.slot = Slot::from(slots_per_historical_root);
state_b.slot = Slot::from(slots_per_historical_root * 2);
*state_a.slot_mut() = Slot::from(slots_per_historical_root);
*state_b.slot_mut() = Slot::from(slots_per_historical_root * 2);
let mut hashes = (0..).map(Hash256::from_low_u64_be);
for root in &mut state_a.block_roots[..] {
*root = hashes.next().unwrap()
let roots_a = state_a.block_roots_mut();
for i in 0..roots_a.len() {
roots_a[i] = hashes.next().unwrap()
}
for root in &mut state_b.block_roots[..] {
*root = hashes.next().unwrap()
let roots_b = state_b.block_roots_mut();
for i in 0..roots_b.len() {
roots_b[i] = hashes.next().unwrap()
}
let state_a_root = hashes.next().unwrap();
state_b.state_roots[0] = state_a_root;
state_b.state_roots_mut()[0] = state_a_root;
store.put_state(&state_a_root, &state_a).unwrap();
let iter = BlockRootsIterator::new(store, &state_b);
@ -405,15 +409,15 @@ mod test {
fn state_root_iter() {
let log = NullLoggerBuilder.build().unwrap();
let store = Arc::new(
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(),
HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(),
);
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
state_a.slot = Slot::from(slots_per_historical_root);
state_b.slot = Slot::from(slots_per_historical_root * 2);
*state_a.slot_mut() = Slot::from(slots_per_historical_root);
*state_b.slot_mut() = Slot::from(slots_per_historical_root * 2);
let mut hashes = (0..).map(Hash256::from_low_u64_be);

View File

@ -2,17 +2,21 @@ use crate::chunked_vector::{
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes,
StateRoots,
};
use crate::{Error, KeyValueStore};
use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp};
use ssz::{Decode, DecodeError, Encode};
use ssz_derive::{Decode, Encode};
use std::convert::TryInto;
use types::superstruct;
use types::*;
/// Lightweight variant of the `BeaconState` that is stored in the database.
///
/// Utilises lazy-loading from separate storage for its vector fields.
///
/// Spec v0.12.1
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
#[superstruct(
variants(Base, Altair),
variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode))
)]
#[derive(Debug, PartialEq, Clone, Encode)]
pub struct PartialBeaconState<T>
where
T: EthSpec,
@ -20,6 +24,7 @@ where
// Versioning
pub genesis_time: u64,
pub genesis_validators_root: Hash256,
#[superstruct(getter(copy))]
pub slot: Slot,
pub fork: Fork,
@ -56,71 +61,152 @@ where
// Slashings
slashings: FixedVector<u64, T::EpochsPerSlashingsVector>,
// Attestations
// Attestations (genesis fork only)
#[superstruct(only(Base))]
pub previous_epoch_attestations: VariableList<PendingAttestation<T>, T::MaxPendingAttestations>,
#[superstruct(only(Base))]
pub current_epoch_attestations: VariableList<PendingAttestation<T>, T::MaxPendingAttestations>,
// Participation (Altair and later)
#[superstruct(only(Altair))]
pub previous_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
#[superstruct(only(Altair))]
pub current_epoch_participation: VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
// Finality
pub justification_bits: BitVector<T::JustificationBitsLength>,
pub previous_justified_checkpoint: Checkpoint,
pub current_justified_checkpoint: Checkpoint,
pub finalized_checkpoint: Checkpoint,
// Inactivity
#[superstruct(only(Altair))]
pub inactivity_scores: VariableList<u64, T::ValidatorRegistryLimit>,
// Light-client sync committees
#[superstruct(only(Altair))]
pub current_sync_committee: SyncCommittee<T>,
#[superstruct(only(Altair))]
pub next_sync_committee: SyncCommittee<T>,
}
impl<T: EthSpec> PartialBeaconState<T> {
/// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields.
pub fn from_state_forgetful(s: &BeaconState<T>) -> Self {
// TODO: could use references/Cow for fields to avoid cloning
PartialBeaconState {
genesis_time: s.genesis_time,
genesis_validators_root: s.genesis_validators_root,
slot: s.slot,
fork: s.fork,
/// Implement the conversion function from BeaconState -> PartialBeaconState.
macro_rules! impl_from_state_forgetful {
($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
PartialBeaconState::$variant_name($struct_name {
// Versioning
genesis_time: $s.genesis_time,
genesis_validators_root: $s.genesis_validators_root,
slot: $s.slot,
fork: $s.fork,
// History
latest_block_header: s.latest_block_header.clone(),
latest_block_header: $s.latest_block_header.clone(),
block_roots: None,
state_roots: None,
historical_roots: None,
// Eth1
eth1_data: s.eth1_data.clone(),
eth1_data_votes: s.eth1_data_votes.clone(),
eth1_deposit_index: s.eth1_deposit_index,
eth1_data: $s.eth1_data.clone(),
eth1_data_votes: $s.eth1_data_votes.clone(),
eth1_deposit_index: $s.eth1_deposit_index,
// Validator registry
validators: s.validators.clone(),
balances: s.balances.clone(),
validators: $s.validators.clone(),
balances: $s.balances.clone(),
// Shuffling
latest_randao_value: *s
.get_randao_mix(s.current_epoch())
latest_randao_value: *$outer
.get_randao_mix($outer.current_epoch())
.expect("randao at current epoch is OK"),
randao_mixes: None,
// Slashings
slashings: s.get_all_slashings().to_vec().into(),
// Attestations
previous_epoch_attestations: s.previous_epoch_attestations.clone(),
current_epoch_attestations: s.current_epoch_attestations.clone(),
slashings: $s.slashings.clone(),
// Finality
justification_bits: s.justification_bits.clone(),
previous_justified_checkpoint: s.previous_justified_checkpoint,
current_justified_checkpoint: s.current_justified_checkpoint,
finalized_checkpoint: s.finalized_checkpoint,
justification_bits: $s.justification_bits.clone(),
previous_justified_checkpoint: $s.previous_justified_checkpoint,
current_justified_checkpoint: $s.current_justified_checkpoint,
finalized_checkpoint: $s.finalized_checkpoint,
// Variant-specific fields
$(
$extra_fields: $s.$extra_fields.clone()
),*
})
}
}
impl<T: EthSpec> PartialBeaconState<T> {
/// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields.
pub fn from_state_forgetful(outer: &BeaconState<T>) -> Self {
match outer {
BeaconState::Base(s) => impl_from_state_forgetful!(
s,
outer,
Base,
PartialBeaconStateBase,
[previous_epoch_attestations, current_epoch_attestations]
),
BeaconState::Altair(s) => impl_from_state_forgetful!(
s,
outer,
Altair,
PartialBeaconStateAltair,
[
previous_epoch_participation,
current_epoch_participation,
current_sync_committee,
next_sync_committee,
inactivity_scores
]
),
}
}
/// SSZ decode.
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
// Slot is after genesis_time (u64) and genesis_validators_root (Hash256).
let slot_offset = <u64 as Decode>::ssz_fixed_len() + <Hash256 as Decode>::ssz_fixed_len();
let slot_len = <Slot as Decode>::ssz_fixed_len();
let slot_bytes = bytes.get(slot_offset..slot_offset + slot_len).ok_or(
DecodeError::InvalidByteLength {
len: bytes.len(),
expected: slot_offset + slot_len,
},
)?;
let slot = Slot::from_ssz_bytes(slot_bytes)?;
let epoch = slot.epoch(T::slots_per_epoch());
if spec
.altair_fork_epoch
.map_or(true, |altair_epoch| epoch < altair_epoch)
{
PartialBeaconStateBase::from_ssz_bytes(bytes).map(Self::Base)
} else {
PartialBeaconStateAltair::from_ssz_bytes(bytes).map(Self::Altair)
}
}
/// Prepare the partial state for storage in the KV database.
#[must_use]
pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp {
let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes());
KeyValueStoreOp::PutKeyValue(db_key, self.as_ssz_bytes())
}
pub fn load_block_roots<S: KeyValueStore<T>>(
&mut self,
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.block_roots.is_none() {
self.block_roots = Some(load_vector_from_db::<BlockRoots, T, _>(
store, self.slot, spec,
if self.block_roots().is_none() {
*self.block_roots_mut() = Some(load_vector_from_db::<BlockRoots, T, _>(
store,
self.slot(),
spec,
)?);
}
Ok(())
@ -131,9 +217,11 @@ impl<T: EthSpec> PartialBeaconState<T> {
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.state_roots.is_none() {
self.state_roots = Some(load_vector_from_db::<StateRoots, T, _>(
store, self.slot, spec,
if self.state_roots().is_none() {
*self.state_roots_mut() = Some(load_vector_from_db::<StateRoots, T, _>(
store,
self.slot(),
spec,
)?);
}
Ok(())
@ -144,10 +232,10 @@ impl<T: EthSpec> PartialBeaconState<T> {
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.historical_roots.is_none() {
self.historical_roots = Some(load_variable_list_from_db::<HistoricalRoots, T, _>(
store, self.slot, spec,
)?);
if self.historical_roots().is_none() {
*self.historical_roots_mut() = Some(
load_variable_list_from_db::<HistoricalRoots, T, _>(store, self.slot(), spec)?,
);
}
Ok(())
}
@ -157,72 +245,101 @@ impl<T: EthSpec> PartialBeaconState<T> {
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.randao_mixes.is_none() {
if self.randao_mixes().is_none() {
// Load the per-epoch values from the database
let mut randao_mixes =
load_vector_from_db::<RandaoMixes, T, _>(store, self.slot, spec)?;
load_vector_from_db::<RandaoMixes, T, _>(store, self.slot(), spec)?;
// Patch the value for the current slot into the index for the current epoch
let current_epoch = self.slot.epoch(T::slots_per_epoch());
let current_epoch = self.slot().epoch(T::slots_per_epoch());
let len = randao_mixes.len();
randao_mixes[current_epoch.as_usize() % len] = self.latest_randao_value;
randao_mixes[current_epoch.as_usize() % len] = *self.latest_randao_value();
self.randao_mixes = Some(randao_mixes)
*self.randao_mixes_mut() = Some(randao_mixes)
}
Ok(())
}
}
impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
type Error = Error;
fn try_into(self) -> Result<BeaconState<E>, Error> {
fn unpack<T>(x: Option<T>) -> Result<T, Error> {
x.ok_or(Error::PartialBeaconStateError)
}
Ok(BeaconState {
genesis_time: self.genesis_time,
genesis_validators_root: self.genesis_validators_root,
slot: self.slot,
fork: self.fork,
/// Implement the conversion from PartialBeaconState -> BeaconState.
macro_rules! impl_try_into_beacon_state {
($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
BeaconState::$variant_name($struct_name {
// Versioning
genesis_time: $inner.genesis_time,
genesis_validators_root: $inner.genesis_validators_root,
slot: $inner.slot,
fork: $inner.fork,
// History
latest_block_header: self.latest_block_header,
block_roots: unpack(self.block_roots)?,
state_roots: unpack(self.state_roots)?,
historical_roots: unpack(self.historical_roots)?,
latest_block_header: $inner.latest_block_header,
block_roots: unpack_field($inner.block_roots)?,
state_roots: unpack_field($inner.state_roots)?,
historical_roots: unpack_field($inner.historical_roots)?,
// Eth1
eth1_data: self.eth1_data,
eth1_data_votes: self.eth1_data_votes,
eth1_deposit_index: self.eth1_deposit_index,
eth1_data: $inner.eth1_data,
eth1_data_votes: $inner.eth1_data_votes,
eth1_deposit_index: $inner.eth1_deposit_index,
// Validator registry
validators: self.validators,
balances: self.balances,
validators: $inner.validators,
balances: $inner.balances,
// Shuffling
randao_mixes: unpack(self.randao_mixes)?,
randao_mixes: unpack_field($inner.randao_mixes)?,
// Slashings
slashings: self.slashings,
// Attestations
previous_epoch_attestations: self.previous_epoch_attestations,
current_epoch_attestations: self.current_epoch_attestations,
slashings: $inner.slashings,
// Finality
justification_bits: self.justification_bits,
previous_justified_checkpoint: self.previous_justified_checkpoint,
current_justified_checkpoint: self.current_justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
justification_bits: $inner.justification_bits,
previous_justified_checkpoint: $inner.previous_justified_checkpoint,
current_justified_checkpoint: $inner.current_justified_checkpoint,
finalized_checkpoint: $inner.finalized_checkpoint,
// Caching
committee_caches: <_>::default(),
pubkey_cache: <_>::default(),
exit_cache: <_>::default(),
tree_hash_cache: <_>::default(),
// Variant-specific fields
$(
$extra_fields: $inner.$extra_fields
),*
})
}
}
fn unpack_field<T>(x: Option<T>) -> Result<T, Error> {
x.ok_or(Error::PartialBeaconStateError)
}
impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
type Error = Error;
fn try_into(self) -> Result<BeaconState<E>, Error> {
let state = match self {
PartialBeaconState::Base(inner) => impl_try_into_beacon_state!(
inner,
Base,
BeaconStateBase,
[previous_epoch_attestations, current_epoch_attestations]
),
PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!(
inner,
Altair,
BeaconStateAltair,
[
previous_epoch_participation,
current_epoch_participation,
current_sync_committee,
next_sync_committee,
inactivity_scores
]
),
};
Ok(state)
}
}

View File

@ -53,7 +53,7 @@ fn http_server_genesis_state() {
.expect("client should have beacon chain")
.state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots)
.expect("should find state");
db_state.drop_all_caches();
db_state.drop_all_caches().unwrap();
assert_eq!(
api_state, db_state,

View File

@ -83,12 +83,7 @@ impl<T: EthSpec> TryFrom<&ArgMatches<'_>> for BootNodeConfig<T> {
} else {
// build the enr_fork_id and add it to the local_enr if it exists
let enr_fork = {
let spec = eth2_network_config
.yaml_config
.as_ref()
.ok_or("The network directory must contain a spec config")?
.apply_to_chain_spec::<T>(&T::default_spec())
.ok_or("The loaded config is not compatible with the current spec")?;
let spec = eth2_network_config.chain_spec::<T>()?;
if eth2_network_config.beacon_state_is_known() {
let genesis_state = eth2_network_config.beacon_state::<T>()?;
@ -96,7 +91,7 @@ impl<T: EthSpec> TryFrom<&ArgMatches<'_>> for BootNodeConfig<T> {
slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string());
let enr_fork = spec.enr_fork_id(
types::Slot::from(0u64),
genesis_state.genesis_validators_root,
genesis_state.genesis_validators_root(),
);
Some(enr_fork.as_ssz_bytes())

View File

@ -51,7 +51,6 @@ pub fn run(matches: &ArgMatches<'_>, eth_spec_id: EthSpecId, debug_level: String
if let Err(e) = match eth_spec_id {
EthSpecId::Minimal => main::<types::MinimalEthSpec>(matches, log),
EthSpecId::Mainnet => main::<types::MainnetEthSpec>(matches, log),
EthSpecId::V012Legacy => main::<types::V012LegacyEthSpec>(matches, log),
} {
slog::crit!(slog_scope::logger(), "{}", e);
}

View File

@ -21,7 +21,6 @@ use reqwest::{IntoUrl, Response};
pub use reqwest::{StatusCode, Url};
use sensitive_url::SensitiveUrl;
use serde::{de::DeserializeOwned, Serialize};
use ssz::Decode;
use std::convert::TryFrom;
use std::fmt;
use std::iter::Iterator;
@ -498,6 +497,7 @@ impl BeaconNodeHttpClient {
pub async fn get_beacon_blocks_ssz<T: EthSpec>(
&self,
block_id: BlockId,
spec: &ChainSpec,
) -> Result<Option<SignedBeaconBlock<T>>, Error> {
let mut path = self.eth_path()?;
@ -509,7 +509,7 @@ impl BeaconNodeHttpClient {
self.get_bytes_opt_accept_header(path, Accept::Ssz)
.await?
.map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
.map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz))
.transpose()
}
@ -715,7 +715,7 @@ impl BeaconNodeHttpClient {
}
/// `GET config/spec`
pub async fn get_config_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> {
pub async fn get_config_spec(&self) -> Result<GenericResponse<ConfigAndPreset>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
@ -883,6 +883,7 @@ impl BeaconNodeHttpClient {
pub async fn get_debug_beacon_states_ssz<T: EthSpec>(
&self,
state_id: StateId,
spec: &ChainSpec,
) -> Result<Option<BeaconState<T>>, Error> {
let mut path = self.eth_path()?;
@ -895,7 +896,7 @@ impl BeaconNodeHttpClient {
self.get_bytes_opt_accept_header(path, Accept::Ssz)
.await?
.map(|bytes| BeaconState::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
.map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz))
.transpose()
}

View File

@ -2,13 +2,12 @@
use crate::{
ok_or_error,
types::{BeaconState, Epoch, EthSpec, GenericResponse, ValidatorId},
types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId},
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode,
};
use proto_array::core::ProtoArray;
use reqwest::IntoUrl;
use serde::{Deserialize, Serialize};
use ssz::Decode;
use ssz_derive::{Decode, Encode};
pub use eth2_libp2p::{types::SyncState, PeerInfo};
@ -470,6 +469,7 @@ impl BeaconNodeHttpClient {
pub async fn get_lighthouse_beacon_states_ssz<E: EthSpec>(
&self,
state_id: &StateId,
spec: &ChainSpec,
) -> Result<Option<BeaconState<E>>, Error> {
let mut path = self.server.full.clone();
@ -483,7 +483,7 @@ impl BeaconNodeHttpClient {
self.get_bytes_opt(path)
.await?
.map(|bytes| BeaconState::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
.map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz))
.transpose()
}

View File

@ -211,7 +211,7 @@ impl ValidatorClientHttpClient {
}
/// `GET lighthouse/spec`
pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> {
pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<ConfigAndPreset>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()

View File

@ -44,13 +44,6 @@ impl Eth2Config {
spec: ChainSpec::minimal(),
}
}
pub fn v012_legacy() -> Self {
Self {
eth_spec_id: EthSpecId::V012Legacy,
spec: ChainSpec::v012_legacy(),
}
}
}
/// A directory that can be built by downloading files via HTTP.
@ -112,16 +105,8 @@ macro_rules! define_net {
};
}
define_net!(altona, include_altona_file, "altona", true);
define_net!(medalla, include_medalla_file, "medalla", true);
define_net!(spadina, include_spadina_file, "spadina", true);
define_net!(pyrmont, include_pyrmont_file, "pyrmont", true);
define_net!(mainnet, include_mainnet_file, "mainnet", true);
define_net!(toledo, include_toledo_file, "toledo", true);
define_net!(prater, include_prater_file, "prater", true);

View File

@ -1,19 +1,12 @@
//! Extracts zipped genesis states on first run.
use eth2_config::{
altona, mainnet, medalla, prater, pyrmont, spadina, toledo, Eth2NetArchiveAndDirectory,
GENESIS_FILE_NAME,
};
use eth2_config::{mainnet, prater, pyrmont, Eth2NetArchiveAndDirectory, GENESIS_FILE_NAME};
use std::fs::File;
use std::io;
use zip::ZipArchive;
const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[
altona::ETH2_NET_DIR,
medalla::ETH2_NET_DIR,
spadina::ETH2_NET_DIR,
mainnet::ETH2_NET_DIR,
pyrmont::ETH2_NET_DIR,
toledo::ETH2_NET_DIR,
prater::ETH2_NET_DIR,
];

View File

@ -1,10 +0,0 @@
- enr:-LK4QFtV7Pz4reD5a7cpfi1z6yPrZ2I9eMMU5mGQpFXLnLoKZW8TXvVubShzLLpsEj6aayvVO1vFx-MApijD3HLPhlECh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD6etXjAAABIf__________gmlkgnY0gmlwhDMPYfCJc2VjcDI1NmsxoQIerw_qBc9apYfZqo2awiwS930_vvmGnW2psuHsTzrJ8YN0Y3CCIyiDdWRwgiMo
- enr:-LK4QPVkFd_MKzdW0219doTZryq40tTe8rwWYO75KDmeZM78fBskGsfCuAww9t8y3u0Q0FlhXOhjE1CWpx3SGbUaU80Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpD6etXjAAABIf__________gmlkgnY0gmlwhDMPRgeJc2VjcDI1NmsxoQNHu-QfNgzl8VxbMiPgv6wgAljojnqAOrN18tzJMuN8oYN0Y3CCIyiDdWRwgiMo
- enr:-LK4QHe52XPPrcv6-MvcmN5GqDe_sgCwo24n_2hedlfwD_oxNt7cXL3tXJ7h9aYv6CTS1C_H2G2_dkeqm_LBO9nrpiYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhANzD9uJc2VjcDI1NmsxoQJX7zMnRU3szfGfS8MAIfPaQKOBpu3sBVTXf4Qq0b_m-4N0Y3CCIyiDdWRwgiMo
- enr:-LK4QLkbbq7xuRa_EnWd_kc0TkQk0pd0B0cZYR5LvBsncFQBDyPbGdy8d24TzRVeK7ZWwM5_2EcSJK223f8TYUOQYfwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhAPsjtOJc2VjcDI1NmsxoQJNw_aZgWXl2SstD--WAjooGudjWLjEbbCIddJuEPxzWYN0Y3CCIyiDdWRwgiMo
- enr:-LK4QHy-glnxN1WTk5f6d7-xXwy_UKJLs5k7p_S4KRY9I925KTzW_kQLjfFriIpH0de7kygBwrSl726ukq9_OG_sgKMCh2F0dG5ldHOIUjEAIQEAFMiEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhBLmhrCJc2VjcDI1NmsxoQNlU7gT0HUvpLA41n-P5GrCgjwMwtG02YsRRO0lAmpmBYN0Y3CCIyiDdWRwgiMo
- enr:-LK4QDz0n0vpyOpuStB8e22h9ayHVcvmN7o0trC7eC0DnZV9GYGzK5uKv7WlzpMQM2nDTG43DWvF_DZYwJOZCbF4iCQBh2F0dG5ldHOI__________-EZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhBKN136Jc2VjcDI1NmsxoQP5gcOUcaruHuMuTv8ht7ZEawp3iih7CmeLqcoY1hxOnoN0Y3CCIyiDdWRwgiMo
- enr:-LK4QOScOZ35sOXEH6CEW15lfv7I3DhqQAzCPQ_nRav95otuSh4yi9ol0AruKDiIk9qqGXyD-wQDaBAPLhwl4t-rUSQBh2F0dG5ldHOI__________-EZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhCL68KuJc2VjcDI1NmsxoQK5fYR3Ipoc01dz0d2-EcL7m26zKQSkAbf4rwcMMM09CoN0Y3CCIyiDdWRwgiMo
- enr:-Ku4QMqmWPFkgM58F16wxB50cqWDaWaIsyANHL8wUNSB4Cy1TP9__uJQNRODvx_dvO6rY-BT3psrYTMAaxnMGXb6DuoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQNoed9JnQh7ltcAacHEGOjwocL1BhMQbYTgaPX0kFuXtIN1ZHCCE4g
- enr:-LK4QDHu6BtDKnGbthNp-GvweQlW0jiOX9KFCj5Ql9kScrFed76tgHlFv7A-9ZRB-EVZpKItvlNjo3yxjj7jYIZUJa4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhDbUyQKJc2VjcDI1NmsxoQLV6Yse8baXDFu9r_dvm9BVd2ni2-wwvANWA-4ewbhniIN0Y3CCIyiDdWRwgiMo
- enr:-LK4QF3lT3Ch8Ljyx-KwoPrvoJHO-HDd3jOREMIZCWzi_HkHFVub5qt52MliDTLDgpXMS9tBzzLI4ObT_Z2m2Kus9vMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhBKNqHeJc2VjcDI1NmsxoQOTO9uI9UZjuTOpcWvnCfhfQTmcMaIzBFsjMpXYnppET4N0Y3CCIyiDdWRwgiMo

View File

@ -1,60 +0,0 @@
CONFIG_NAME: "altona"
MAX_COMMITTEES_PER_SLOT: 64
TARGET_COMMITTEE_SIZE: 128
MAX_VALIDATORS_PER_COMMITTEE: 2048
MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
SHUFFLE_ROUND_COUNT: 90
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 640
MIN_GENESIS_TIME: 1593433800
HYSTERESIS_QUOTIENT: 4
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
ETH1_FOLLOW_DISTANCE: 1024
TARGET_AGGREGATORS_PER_COMMITTEE: 16
RANDOM_SUBNETS_PER_VALIDATOR: 1
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
SECONDS_PER_ETH1_BLOCK: 14
DEPOSIT_CONTRACT_ADDRESS: 0x16e82D77882A663454Ef92806b7DeCa1D394810f
MIN_DEPOSIT_AMOUNT: 1000000000
MAX_EFFECTIVE_BALANCE: 32000000000
EJECTION_BALANCE: 16000000000
EFFECTIVE_BALANCE_INCREMENT: 1000000000
GENESIS_FORK_VERSION: 0x00000121
BLS_WITHDRAWAL_PREFIX: 0x00
GENESIS_DELAY: 172800
SECONDS_PER_SLOT: 12
MIN_ATTESTATION_INCLUSION_DELAY: 1
SLOTS_PER_EPOCH: 32
MIN_SEED_LOOKAHEAD: 1
MAX_SEED_LOOKAHEAD: 4
EPOCHS_PER_ETH1_VOTING_PERIOD: 32
SLOTS_PER_HISTORICAL_ROOT: 8192
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
SHARD_COMMITTEE_PERIOD: 256
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
EPOCHS_PER_HISTORICAL_VECTOR: 65536
EPOCHS_PER_SLASHINGS_VECTOR: 8192
HISTORICAL_ROOTS_LIMIT: 16777216
VALIDATOR_REGISTRY_LIMIT: 1099511627776
BASE_REWARD_FACTOR: 64
WHISTLEBLOWER_REWARD_QUOTIENT: 512
PROPOSER_REWARD_QUOTIENT: 8
INACTIVITY_PENALTY_QUOTIENT: 16777216
MIN_SLASHING_PENALTY_QUOTIENT: 32
MAX_PROPOSER_SLASHINGS: 16
MAX_ATTESTER_SLASHINGS: 2
MAX_ATTESTATIONS: 128
MAX_DEPOSITS: 16
MAX_VOLUNTARY_EXITS: 16
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
PROPORTIONAL_SLASHING_MULTIPLIER: 3

View File

@ -1,51 +1,66 @@
# Mainnet preset
# Mainnet config
CONFIG_NAME: "mainnet"
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
# Misc
# Genesis
# ---------------------------------------------------------------
# 2**6 (= 64)
MAX_COMMITTEES_PER_SLOT: 64
# 2**7 (= 128)
TARGET_COMMITTEE_SIZE: 128
# 2**11 (= 2,048)
MAX_VALIDATORS_PER_COMMITTEE: 2048
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Dec 1, 2020, 12pm UTC
MIN_GENESIS_TIME: 1606824000
# 4
HYSTERESIS_QUOTIENT: 4
# 1 (minus 0.25)
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
# 5 (plus 1.25)
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Mainnet initial fork version, recommend altering for testnets
GENESIS_FORK_VERSION: 0x00000000
# 604800 seconds (7 days)
GENESIS_DELAY: 604800
# Fork Choice
# Forking
# ---------------------------------------------------------------
# 2**3 (= 8)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x01000000
ALTAIR_FORK_EPOCH: 18446744073709551615
# Merge
MERGE_FORK_VERSION: 0x02000000
MERGE_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x03000000
SHARDING_FORK_EPOCH: 18446744073709551615
# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D.
TRANSITION_TOTAL_DIFFICULTY: 4294967296
# Validator
# Time parameters
# ---------------------------------------------------------------
# 2**11 (= 2,048)
ETH1_FOLLOW_DISTANCE: 2048
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Deposit contract
@ -53,103 +68,4 @@ SECONDS_PER_ETH1_BLOCK: 14
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1
DEPOSIT_NETWORK_ID: 1
# **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
# Mainnet initial fork version, recommend altering for testnets
GENESIS_FORK_VERSION: 0x00000000
BLS_WITHDRAWAL_PREFIX: 0x00
# Time parameters
# ---------------------------------------------------------------
# 604800 seconds (7 days)
GENESIS_DELAY: 604800
# 12 seconds
SECONDS_PER_SLOT: 12
# 2**0 (= 1) slots 12 seconds
MIN_ATTESTATION_INCLUSION_DELAY: 1
# 2**5 (= 32) slots 6.4 minutes
SLOTS_PER_EPOCH: 32
# 2**0 (= 1) epochs 6.4 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 25.6 minutes
MAX_SEED_LOOKAHEAD: 4
# 2**6 (= 64) epochs ~6.8 hours
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
# 2**13 (= 8,192) slots ~13 hours
SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State vector lengths
# ---------------------------------------------------------------
# 2**16 (= 65,536) epochs ~0.8 years
EPOCHS_PER_HISTORICAL_VECTOR: 65536
# 2**13 (= 8,192) epochs ~36 days
EPOCHS_PER_SLASHINGS_VECTOR: 8192
# 2**24 (= 16,777,216) historical roots, ~26,131 years
HISTORICAL_ROOTS_LIMIT: 16777216
# 2**40 (= 1,099,511,627,776) validator spots
VALIDATOR_REGISTRY_LIMIT: 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# 2**6 (= 64)
BASE_REWARD_FACTOR: 64
# 2**9 (= 512)
WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**26 (= 67,108,864)
INACTIVITY_PENALTY_QUOTIENT: 67108864
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
MIN_SLASHING_PENALTY_QUOTIENT: 128
# 1 (lower safety margin at Phase 0 genesis)
PROPORTIONAL_SLASHING_MULTIPLIER: 1
# Max operations per block
# ---------------------------------------------------------------
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**1 (= 2)
MAX_ATTESTER_SLASHINGS: 2
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000

View File

@ -1,10 +0,0 @@
# lighthouse Node
- enr:-LK4QCGFeQXjpQkgOfLHsbTjD65IOtSqV7Qo-Qdqv6SrL8lqFY7INPMMGP5uGKkVDcJkeXimSeNeypaZV3MHkcJgr9QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhA37LMaJc2VjcDI1NmsxoQJ7k0mKtTd_kdEq251flOjD1HKpqgMmIETDoD-Msy_O-4N0Y3CCIyiDdWRwgiMo
# Lighthouse node
- enr:-LK4QCpyWmMLYwC2umMJ_g0c9VY7YOFwZyaR80_tuQNTWOzJbaR82DDhVQYqmE_0gvN6Du5jwnxzIaaNRZQlVXzfIK0Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhCLR2xuJc2VjcDI1NmsxoQOYiWqrQtQksTEtS3qY6idxJE5wkm0t9wKqpzv2gCR21oN0Y3CCIyiDdWRwgiMo
# Prysm
- enr:-Ku4QOnVSyvzS3VbF87J8MubaRuTyfPi6B67XQg6-5eAV_uILAhn9geTTQmfqDIOcIeAxWHUUajQp6lYniAXPWncp6UBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQKekYKqUtwbaJKKCct_srE5-g7tBUm68mj_jpeSb7CCqYN1ZHCCC7g
# Prysm
- enr:-Ku4QHWezvidY_m0dWEwERrNrqjEQWrlIx7b8K4EIxGgTrLmUxHCZPW5-t8PsS8nFxAJ8k8YacKP5zPRk5gbsTSsRTQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMypP_ODwTuBq2v0oIdjPGCEyu9Hb_jHDbuIX_iNvBRGoN1ZHCCGWQ
# Cat-dog
- enr:-Ku4QJmPsyq4lmDdFebMKXk7vdt8WsLWkArYT2K8eN057oFudm2tITrZJD9sq1x92-bRmXTyAJgb2FD4ior-KHIU3KcDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDaNQiCAAAAA___________gmlkgnY0gmlwhBK4vdCJc2VjcDI1NmsxoQMWAsR84_ETgq4-14FV2x00ptmI-YU3tdkZV9CUgYPEnIN1ZHCCI1s

View File

@ -1,60 +0,0 @@
CONFIG_NAME: "medalla"
MAX_COMMITTEES_PER_SLOT: 64
TARGET_COMMITTEE_SIZE: 128
MAX_VALIDATORS_PER_COMMITTEE: 2048
MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
SHUFFLE_ROUND_COUNT: 90
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
MIN_GENESIS_TIME: 1596546000
HYSTERESIS_QUOTIENT: 4
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
ETH1_FOLLOW_DISTANCE: 1024
TARGET_AGGREGATORS_PER_COMMITTEE: 16
RANDOM_SUBNETS_PER_VALIDATOR: 1
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
SECONDS_PER_ETH1_BLOCK: 14
DEPOSIT_CONTRACT_ADDRESS: 0x07b39F4fDE4A38bACe212b546dAc87C58DfE3fDC
MIN_DEPOSIT_AMOUNT: 1000000000
MAX_EFFECTIVE_BALANCE: 32000000000
EJECTION_BALANCE: 16000000000
EFFECTIVE_BALANCE_INCREMENT: 1000000000
GENESIS_FORK_VERSION: 0x00000001
BLS_WITHDRAWAL_PREFIX: 0x00
GENESIS_DELAY: 172800
SECONDS_PER_SLOT: 12
MIN_ATTESTATION_INCLUSION_DELAY: 1
SLOTS_PER_EPOCH: 32
MIN_SEED_LOOKAHEAD: 1
MAX_SEED_LOOKAHEAD: 4
EPOCHS_PER_ETH1_VOTING_PERIOD: 32
SLOTS_PER_HISTORICAL_ROOT: 8192
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
SHARD_COMMITTEE_PERIOD: 256
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
EPOCHS_PER_HISTORICAL_VECTOR: 65536
EPOCHS_PER_SLASHINGS_VECTOR: 8192
HISTORICAL_ROOTS_LIMIT: 16777216
VALIDATOR_REGISTRY_LIMIT: 1099511627776
BASE_REWARD_FACTOR: 64
WHISTLEBLOWER_REWARD_QUOTIENT: 512
PROPOSER_REWARD_QUOTIENT: 8
INACTIVITY_PENALTY_QUOTIENT: 16777216
MIN_SLASHING_PENALTY_QUOTIENT: 32
MAX_PROPOSER_SLASHINGS: 16
MAX_ATTESTER_SLASHINGS: 2
MAX_ATTESTATIONS: 128
MAX_DEPOSITS: 16
MAX_VOLUNTARY_EXITS: 16
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
PROPORTIONAL_SLASHING_MULTIPLIER: 3

View File

@ -1,53 +1,66 @@
# Prater preset
# Prater config
CONFIG_NAME: "prater"
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
# Misc
# Genesis
# ---------------------------------------------------------------
# 2**6 (= 64)
MAX_COMMITTEES_PER_SLOT: 64
# 2**7 (= 128)
TARGET_COMMITTEE_SIZE: 128
# 2**11 (= 2,048)
MAX_VALIDATORS_PER_COMMITTEE: 2048
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Mar-01-2021 08:53:32 AM +UTC
MIN_GENESIS_TIME: 1614588812
# 4
HYSTERESIS_QUOTIENT: 4
# 1 (minus 0.25)
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
# 5 (plus 1.25)
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Prater area code (Vienna)
GENESIS_FORK_VERSION: 0x00001020
# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC)
GENESIS_DELAY: 1919188
# Fork Choice
# Forking
# ---------------------------------------------------------------
# 2**3 (= 8)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x01000000
ALTAIR_FORK_EPOCH: 18446744073709551615
# Merge
MERGE_FORK_VERSION: 0x02000000
MERGE_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x03000000
SHARDING_FORK_EPOCH: 18446744073709551615
# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D.
TRANSITION_TOTAL_DIFFICULTY: 4294967296
# Validator
# Time parameters
# ---------------------------------------------------------------
# 2**11 (= 2,048)
ETH1_FOLLOW_DISTANCE: 2048
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
@ -55,101 +68,3 @@ DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
# Prater test deposit contract on Goerli Testnet
DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
# Prater area code (Vienna)
GENESIS_FORK_VERSION: 0x00001020
BLS_WITHDRAWAL_PREFIX: 0x00
# Time parameters
# ---------------------------------------------------------------
# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC)
GENESIS_DELAY: 1919188
# 12 seconds
SECONDS_PER_SLOT: 12
# 2**0 (= 1) slots 12 seconds
MIN_ATTESTATION_INCLUSION_DELAY: 1
# 2**5 (= 32) slots 6.4 minutes
SLOTS_PER_EPOCH: 32
# 2**0 (= 1) epochs 6.4 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 25.6 minutes
MAX_SEED_LOOKAHEAD: 4
# 2**6 (= 64) epochs ~6.8 hours
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
# 2**13 (= 8,192) slots ~13 hours
SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State vector lengths
# ---------------------------------------------------------------
# 2**16 (= 65,536) epochs ~0.8 years
EPOCHS_PER_HISTORICAL_VECTOR: 65536
# 2**13 (= 8,192) epochs ~36 days
EPOCHS_PER_SLASHINGS_VECTOR: 8192
# 2**24 (= 16,777,216) historical roots, ~26,131 years
HISTORICAL_ROOTS_LIMIT: 16777216
# 2**40 (= 1,099,511,627,776) validator spots
VALIDATOR_REGISTRY_LIMIT: 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# 2**6 (= 64)
BASE_REWARD_FACTOR: 64
# 2**9 (= 512)
WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**26 (= 67,108,864)
INACTIVITY_PENALTY_QUOTIENT: 67108864
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
MIN_SLASHING_PENALTY_QUOTIENT: 128
# 1 (lower safety margin at Phase 0 genesis)
PROPORTIONAL_SLASHING_MULTIPLIER: 1
# Max operations per block
# ---------------------------------------------------------------
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**1 (= 2)
MAX_ATTESTER_SLASHINGS: 2
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000

View File

@ -1,52 +1,66 @@
# Pyrmont preset
CONFIG_NAME: "pyrmont"
# Pyrmont config
# Misc
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
# Genesis
# ---------------------------------------------------------------
# 2**6 (= 64)
MAX_COMMITTEES_PER_SLOT: 64
# 2**7 (= 128)
TARGET_COMMITTEE_SIZE: 128
# 2**11 (= 2,048)
MAX_VALIDATORS_PER_COMMITTEE: 2048
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Nov 18, 2020, 12pm UTC
MIN_GENESIS_TIME: 1605700800
# 4
HYSTERESIS_QUOTIENT: 4
# 1 (minus 0.25)
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
# 5 (plus 1.25)
HYSTERESIS_UPWARD_MULTIPLIER: 5
# Pyrmont area code
GENESIS_FORK_VERSION: 0x00002009
# Customized for Pyrmont: 432000 seconds (5 days)
GENESIS_DELAY: 432000
# Fork Choice
# Forking
# ---------------------------------------------------------------
# 2**3 (= 8)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x01000000
ALTAIR_FORK_EPOCH: 18446744073709551615
# Merge
MERGE_FORK_VERSION: 0x02000000
MERGE_FORK_EPOCH: 18446744073709551615
# Sharding
SHARDING_FORK_VERSION: 0x03000000
SHARDING_FORK_EPOCH: 18446744073709551615
# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D.
TRANSITION_TOTAL_DIFFICULTY: 4294967296
# Validator
# Time parameters
# ---------------------------------------------------------------
# 2**11 (= 2,048)
ETH1_FOLLOW_DISTANCE: 2048
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
@ -54,101 +68,3 @@ DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
# Pyrmont test deposit contract on Goerli (2nd edition, 0x00002009 fork version)
DEPOSIT_CONTRACT_ADDRESS: 0x8c5fecdC472E27Bc447696F431E425D02dd46a8c
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
EFFECTIVE_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
# Pyrmont area code
GENESIS_FORK_VERSION: 0x00002009
BLS_WITHDRAWAL_PREFIX: 0x00
# Time parameters
# ---------------------------------------------------------------
# Customized for Pyrmont: 432000 seconds (5 days)
GENESIS_DELAY: 432000
# 12 seconds
SECONDS_PER_SLOT: 12
# 2**0 (= 1) slots 12 seconds
MIN_ATTESTATION_INCLUSION_DELAY: 1
# 2**5 (= 32) slots 6.4 minutes
SLOTS_PER_EPOCH: 32
# 2**0 (= 1) epochs 6.4 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 25.6 minutes
MAX_SEED_LOOKAHEAD: 4
# 2**6 (= 64) epochs ~6.8 hours
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
# 2**13 (= 8,192) slots ~13 hours
SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
# State vector lengths
# ---------------------------------------------------------------
# 2**16 (= 65,536) epochs ~0.8 years
EPOCHS_PER_HISTORICAL_VECTOR: 65536
# 2**13 (= 8,192) epochs ~36 days
EPOCHS_PER_SLASHINGS_VECTOR: 8192
# 2**24 (= 16,777,216) historical roots, ~26,131 years
HISTORICAL_ROOTS_LIMIT: 16777216
# 2**40 (= 1,099,511,627,776) validator spots
VALIDATOR_REGISTRY_LIMIT: 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# 2**6 (= 64)
BASE_REWARD_FACTOR: 64
# 2**9 (= 512)
WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**26 (= 67,108,864)
INACTIVITY_PENALTY_QUOTIENT: 67108864
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
MIN_SLASHING_PENALTY_QUOTIENT: 128
# 1 (lower safety margin at Phase 0 genesis)
PROPORTIONAL_SLASHING_MULTIPLIER: 1
# Max operations per block
# ---------------------------------------------------------------
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**1 (= 2)
MAX_ATTESTER_SLASHINGS: 2
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000

View File

@ -1,8 +0,0 @@
# Lighthouse
- enr:-KG4QEPVpcw8HLNsDuqNPIx4sXLCUsuDOHVtPcNmgSewWcDgSkd6s-vGCXlac86BTYIU8sYqhvD-ZeTW1uG5OtEBm-QDhGV0aDKQCfsKEgAAAAL__________4JpZIJ2NIJpcIQ0ECjWiXNlY3AyNTZrMaEDCavdC37lb2fgBgKrvrLRZ-ZvL6JFNeUHHc5TXZ_BYqmDdGNwgiMog3VkcIIjKA
# teku
- enr:-KG4QA-EcFfXQsL2dcneG8vp8HTWLrpwHQ5HhfyIytfpeKOISzROy2kYSsf_v-BZKnIx5XHDjqJ-ttz0hoz6qJA7tasEhGV0aDKQxKgkDQAAAAL__________4JpZIJ2NIJpcIQDFt-UiXNlY3AyNTZrMaECkR4C5DVO_9rB48eHTY4kdyOHsguTEDlvb7Ce0_mvghSDdGNwgiMog3VkcIIjKA
# prysm
- enr:-Ku4QGQJf2bcDAwVGvbvtq3AB4KKwAvStTenY-i_QnW2ABNRRBncIU_5qR_e_um-9t3s9g-Y5ZfFATj1nhtzq6lvgc4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDEqCQNAAAAAv__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQNoed9JnQh7ltcAacHEGOjwocL1BhMQbYTgaPX0kFuXtIN1ZHCCE4g
# proto
- enr:-Ku4QFW1SLbtzJ_ghQQC8-8xezvZ1Mx95J-zer9IPmDE2BKeD_SM7j4vH6xmroUFVuyK-54n2Ey2ueB-Lf-fkbcLwAQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDEqCQNAAAAAv__________gmlkgnY0gmlwhGQZkSyJc2VjcDI1NmsxoQJMcbZhTCEKYSH5-qPQPgYfSHHUMLGBAKU-f-96yYKFMIN1ZHCCIyg

View File

@ -1,60 +0,0 @@
CONFIG_NAME: "spadina"
MAX_COMMITTEES_PER_SLOT: 64
TARGET_COMMITTEE_SIZE: 128
MAX_VALIDATORS_PER_COMMITTEE: 2048
MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
SHUFFLE_ROUND_COUNT: 90
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1024
MIN_GENESIS_TIME: 1601380800
HYSTERESIS_QUOTIENT: 4
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
PROPORTIONAL_SLASHING_MULTIPLIER: 3
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
ETH1_FOLLOW_DISTANCE: 1024
TARGET_AGGREGATORS_PER_COMMITTEE: 16
RANDOM_SUBNETS_PER_VALIDATOR: 1
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
SECONDS_PER_ETH1_BLOCK: 14
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
DEPOSIT_CONTRACT_ADDRESS: 0x48B597F4b53C21B48AD95c7256B49D1779Bd5890
MIN_DEPOSIT_AMOUNT: 1000000000
MAX_EFFECTIVE_BALANCE: 32000000000
EJECTION_BALANCE: 16000000000
EFFECTIVE_BALANCE_INCREMENT: 1000000000
GENESIS_FORK_VERSION: 0x00000002
BLS_WITHDRAWAL_PREFIX: 0x00
GENESIS_DELAY: 172800
SECONDS_PER_SLOT: 12
MIN_ATTESTATION_INCLUSION_DELAY: 1
SLOTS_PER_EPOCH: 32
MIN_SEED_LOOKAHEAD: 1
MAX_SEED_LOOKAHEAD: 4
EPOCHS_PER_ETH1_VOTING_PERIOD: 32
SLOTS_PER_HISTORICAL_ROOT: 8192
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
SHARD_COMMITTEE_PERIOD: 256
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
EPOCHS_PER_HISTORICAL_VECTOR: 65536
EPOCHS_PER_SLASHINGS_VECTOR: 8192
HISTORICAL_ROOTS_LIMIT: 16777216
VALIDATOR_REGISTRY_LIMIT: 1099511627776
BASE_REWARD_FACTOR: 64
WHISTLEBLOWER_REWARD_QUOTIENT: 512
PROPOSER_REWARD_QUOTIENT: 8
INACTIVITY_PENALTY_QUOTIENT: 16777216
MIN_SLASHING_PENALTY_QUOTIENT: 32
MAX_PROPOSER_SLASHINGS: 16
MAX_ATTESTER_SLASHINGS: 2
MAX_ATTESTATIONS: 128
MAX_DEPOSITS: 16
MAX_VOLUNTARY_EXITS: 16
DOMAIN_BEACON_PROPOSER: 0x00000000
DOMAIN_BEACON_ATTESTER: 0x01000000
DOMAIN_RANDAO: 0x02000000
DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000

View File

@ -1,20 +0,0 @@
# discv5.1-only bootnode @protolambda
- enr:-Ku4QL5E378NT4-vqP6v1mZ7kHxiTHJvuBvQixQsuTTCffa0PJNWMBlG3Mduvsvd6T2YP1U3l5tBKO5H-9wyX2SCtPkBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC4EvfsAHAe0P__________gmlkgnY0gmlwhDaetEeJc2VjcDI1NmsxoQKtGC2CAuba7goLLdle899M3esUmoWRvzi7GBVhq6ViCYN1ZHCCIyg
# lighthouse (Canada) @protolambda
- enr:-LK4QHLujdDjOwm2siyFJ2XGz19_ip-qTtozG3ceZ3_56G-LMWb4um67gTSYRJg0WsSkyvRMBEpz8uuIYl-7HfWvktgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCO3C5OJc2VjcDI1NmsxoQKXw9BLDY6YwmqTtfkzUnlJQb82UrlX4lIAnSSYWHFRlYN0Y3CCIyiDdWRwgiMo
# lighthouse (Sao Paulo) @protolambda
- enr:-LK4QMxmk7obupScBebKFaasSH3QmYUg-HaEmMAljfmGQCLbKwdOhszzx-VfVPvlH7bZZbOmg3-SNWbJsFfytdjD7a4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhBLkdWuJc2VjcDI1NmsxoQOwYsJyLOjJcDIqiQSSZtDi_EwwSaUjPBSnLVY_PYu-HoN0Y3CCIyiDdWRwgiMo
# Teku @protolambda
- enr:-KG4QKqo0mG4C35ntJg8icO54wd973aZ7aBiAnC2t1XkGvgqNDOEHwNe2ykxYVUj9AWjm_lKD7brlhXKCZEskGbie2cDhGV0aDKQl5uvZwBwHtD__________4JpZIJ2NIJpcIQNOThwiXNlY3AyNTZrMaECn1dwC8MRt8rk2VUT8RjzEBaceF09d4CEQI20O_SWYcqDdGNwgiMog3VkcIIjKA
# Prysm @protolambda
- enr:-LK4QAhU5smiLgU0AgrdFv8eCKmDPCBkXCMCIy8Aktaci5qvCYOsW98xVqJS6OoPWt4Sz_YoTdLQBWxd-RZ756vmGPMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhDTTDL2Jc2VjcDI1NmsxoQOmSJ0mKsQjab7Zralm1Hi0AEReZ2SEqYdKoOPmoA98DoN0Y3CCIyiDdWRwgiMo
# Lighthouse: @sigp
- enr:-LK4QBsu_4I-tmA5WgxkJWRuVUCj2_QE2mmrwX0sFvAc3NR_YPrub4kpvPCb_OjKLwEefxey81SAcvQ7mr2Vvh8xhbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhA3UHZWJc2VjcDI1NmsxoQL9FPylFeunleHuPXlbB938eIMd3X9y9cJ8ZI8y3Li0u4N0Y3CCIyiDdWRwgiMo
# Lighthouse: @sigp
- enr:-LK4QEfW9TCASUUy8L5xamlTVs3JbgT8iYOUspJkbh3rj-BuUndLjtonockiN2K_0g-cBQGq-wvsgAiz5Q3-ic-Wz_ABh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCLV8-OJc2VjcDI1NmsxoQKYJuiXbqPzkbT0NAKIJneNWiX0136HiYI9qtx5NF1IloN0Y3CCIyiDdWRwgiMo

View File

@ -1,23 +1,21 @@
use eth2_config::{predefined_networks_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{BeaconState, EthSpec, EthSpecId, YamlConfig};
use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
pub const BASE_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub genesis_state_bytes: &'static [u8],
@ -30,7 +28,7 @@ macro_rules! define_net {
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
@ -38,16 +36,11 @@ macro_rules! define_net {
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const PRATER: HardcodedNet = define_net!(prater, include_prater_file);
const HARDCODED_NETS: &[HardcodedNet] =
&[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO, PRATER];
const HARDCODED_NETS: &[HardcodedNet] = &[PYRMONT, MAINNET, PRATER];
pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet";
/// Specifies an Eth2 network.
@ -60,7 +53,7 @@ pub struct Eth2NetworkConfig {
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
pub config: Config,
}
impl Eth2NetworkConfig {
@ -85,24 +78,17 @@ impl Eth2NetworkConfig {
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
config: serde_yaml::from_reader(net.config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// network configuration.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
self.config
.eth_spec_id()
.ok_or_else(|| "Config does not match any known preset".to_string())
}
/// Returns `true` if this configuration contains a `BeaconState`.
@ -110,14 +96,25 @@ impl Eth2NetworkConfig {
self.genesis_state_bytes.is_some()
}
/// Construct a consolidated `ChainSpec` from the YAML config.
pub fn chain_spec<E: EthSpec>(&self) -> Result<ChainSpec, String> {
ChainSpec::from_config::<E>(&self.config).ok_or_else(|| {
format!(
"YAML configuration incompatible with spec constants for {}",
E::spec_name()
)
})
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let spec = self.chain_spec::<E>()?;
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or("Genesis state is unknown")?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
BeaconState::from_ssz_bytes(genesis_state_bytes, &spec)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
@ -167,9 +164,7 @@ impl Eth2NetworkConfig {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
write_to_yaml_file!(BASE_CONFIG_FILE, &self.config);
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
@ -210,7 +205,7 @@ impl Eth2NetworkConfig {
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
let config = load_from_file!(BASE_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
@ -232,7 +227,7 @@ impl Eth2NetworkConfig {
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
config,
})
}
}
@ -242,9 +237,16 @@ mod tests {
use super::*;
use ssz::Encode;
use tempfile::Builder as TempBuilder;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
use types::{Config, Eth1Data, Hash256, MainnetEthSpec};
type E = V012LegacyEthSpec;
type E = MainnetEthSpec;
#[test]
fn mainnet_config_eq_chain_spec() {
let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap();
let spec = ChainSpec::mainnet();
assert_eq!(spec, config.chain_spec::<E>().unwrap());
}
#[test]
fn hard_coded_nets_work() {
@ -252,27 +254,8 @@ mod tests {
let config = Eth2NetworkConfig::from_hardcoded_net(net)
.unwrap_or_else(|_| panic!("{:?}", net.name));
if net.name == "mainnet"
|| net.name == "toledo"
|| net.name == "pyrmont"
|| net.name == "prater"
{
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
// Ensure we can parse the YAML config to a chain spec.
config.chain_spec::<MainnetEthSpec>().unwrap();
assert_eq!(
config.genesis_state_bytes.is_some(),
@ -296,16 +279,16 @@ mod tests {
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
let config = Config::from_chain_spec::<E>(spec);
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
do_test::<E>(boot_enr, genesis_state, config.clone());
do_test::<E>(None, None, config);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
config: Config,
) {
let temp_dir = TempBuilder::new()
.prefix("eth2_testnet_test")
@ -318,7 +301,7 @@ mod tests {
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
config,
};
testnet

View File

@ -7,4 +7,4 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
itertools = "0.9.0"
itertools = "0.10.0"

View File

@ -244,8 +244,8 @@ where
genesis_block: &BeaconBlock<E>,
genesis_state: &BeaconState<E>,
) -> Result<Self, Error<T::Error>> {
let finalized_block_slot = genesis_block.slot;
let finalized_block_state_root = genesis_block.state_root;
let finalized_block_slot = genesis_block.slot();
let finalized_block_state_root = genesis_block.state_root();
let current_epoch_shuffling_id =
AttestationShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Current)
.map_err(Error::BeaconStateError)?;
@ -370,7 +370,7 @@ where
) -> Result<bool, Error<T::Error>> {
self.update_time(current_slot)?;
let new_justified_checkpoint = &state.current_justified_checkpoint;
let new_justified_checkpoint = &state.current_justified_checkpoint();
if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot())
< SAFE_SLOTS_TO_UPDATE_JUSTIFIED
@ -382,10 +382,10 @@ where
compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch);
// This sanity check is not in the spec, but the invariant is implied.
if justified_slot >= state.slot {
if justified_slot >= state.slot() {
return Err(Error::AttemptToRevertJustification {
store: justified_slot,
state: state.slot,
state: state.slot(),
});
}
@ -434,9 +434,9 @@ where
let current_slot = self.update_time(current_slot)?;
// Parent block must be known.
if !self.proto_array.contains_block(&block.parent_root) {
if !self.proto_array.contains_block(&block.parent_root()) {
return Err(Error::InvalidBlock(InvalidBlock::UnknownParent(
block.parent_root,
block.parent_root(),
)));
}
@ -444,10 +444,10 @@ where
// the are in the past.
//
// Note: presently, we do not delay consideration. We just drop the block.
if block.slot > current_slot {
if block.slot() > current_slot {
return Err(Error::InvalidBlock(InvalidBlock::FutureSlot {
current_slot,
block_slot: block.slot,
block_slot: block.slot(),
}));
}
@ -455,10 +455,10 @@ where
// get_ancestor).
let finalized_slot =
compute_start_slot_at_epoch::<E>(self.fc_store.finalized_checkpoint().epoch);
if block.slot <= finalized_slot {
if block.slot() <= finalized_slot {
return Err(Error::InvalidBlock(InvalidBlock::FinalizedSlot {
finalized_slot,
block_slot: block.slot,
block_slot: block.slot(),
}));
}
@ -471,7 +471,7 @@ where
// `self.proto_array` to do this search. See:
//
// https://github.com/ethereum/eth2.0-specs/pull/1884
let block_ancestor = self.get_ancestor(block.parent_root, finalized_slot)?;
let block_ancestor = self.get_ancestor(block.parent_root(), finalized_slot)?;
let finalized_root = self.fc_store.finalized_checkpoint().root;
if block_ancestor != Some(finalized_root) {
return Err(Error::InvalidBlock(InvalidBlock::NotFinalizedDescendant {
@ -481,24 +481,24 @@ where
}
// Update justified checkpoint.
if state.current_justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch {
if state.current_justified_checkpoint.epoch
if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch {
if state.current_justified_checkpoint().epoch
> self.fc_store.best_justified_checkpoint().epoch
{
self.fc_store
.set_best_justified_checkpoint(state.current_justified_checkpoint);
.set_best_justified_checkpoint(state.current_justified_checkpoint());
}
if self.should_update_justified_checkpoint(current_slot, state)? {
self.fc_store
.set_justified_checkpoint(state.current_justified_checkpoint)
.set_justified_checkpoint(state.current_justified_checkpoint())
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
}
// Update finalized checkpoint.
if state.finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch {
if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch {
self.fc_store
.set_finalized_checkpoint(state.finalized_checkpoint);
.set_finalized_checkpoint(state.finalized_checkpoint());
let finalized_slot =
compute_start_slot_at_epoch::<E>(self.fc_store.finalized_checkpoint().epoch);
@ -507,24 +507,24 @@ where
// information:
//
// https://github.com/ethereum/eth2.0-specs/pull/1880
if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint
&& (state.current_justified_checkpoint.epoch
if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint()
&& (state.current_justified_checkpoint().epoch
> self.fc_store.justified_checkpoint().epoch
|| self
.get_ancestor(self.fc_store.justified_checkpoint().root, finalized_slot)?
!= Some(self.fc_store.finalized_checkpoint().root))
{
self.fc_store
.set_justified_checkpoint(state.current_justified_checkpoint)
.set_justified_checkpoint(state.current_justified_checkpoint())
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
}
let target_slot = block
.slot
.slot()
.epoch(E::slots_per_epoch())
.start_slot(E::slots_per_epoch());
let target_root = if block.slot == target_slot {
let target_root = if block.slot() == target_slot {
block_root
} else {
*state
@ -539,9 +539,9 @@ where
// This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes.
self.proto_array.process_block(ProtoBlock {
slot: block.slot,
slot: block.slot(),
root: block_root,
parent_root: Some(block.parent_root),
parent_root: Some(block.parent_root()),
target_root,
current_epoch_shuffling_id: AttestationShufflingId::new(
block_root,
@ -555,9 +555,9 @@ where
RelativeEpoch::Next,
)
.map_err(Error::BeaconStateError)?,
state_root: block.state_root,
justified_epoch: state.current_justified_checkpoint.epoch,
finalized_epoch: state.finalized_checkpoint.epoch,
state_root: block.state_root(),
justified_epoch: state.current_justified_checkpoint().epoch,
finalized_epoch: state.finalized_checkpoint().epoch,
})?;
Ok(())

View File

@ -1,7 +1,12 @@
#![cfg(not(debug_assertions))]
use std::fmt;
use std::sync::Mutex;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError,
StateSkipConfig, WhenSlotSkipped,
};
@ -9,14 +14,12 @@ use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation,
SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
use std::fmt;
use std::sync::Mutex;
use store::{MemoryStore, StoreConfig};
use types::{
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},
Checkpoint, Epoch, EthSpec, IndexedAttestation, MainnetEthSpec, Slot, SubnetId,
BeaconBlock, BeaconBlockRef, BeaconState, Checkpoint, Epoch, EthSpec, Hash256,
IndexedAttestation, MainnetEthSpec, Slot, SubnetId,
};
use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock};
pub type E = MainnetEthSpec;
@ -47,6 +50,7 @@ impl ForkChoiceTest {
pub fn new() -> Self {
let harness = BeaconChainHarness::new_with_target_aggregators(
MainnetEthSpec,
None,
generate_deterministic_keypairs(VALIDATOR_COUNT),
// Ensure we always have an aggregator for each slot.
u64::max_value(),
@ -60,6 +64,7 @@ impl ForkChoiceTest {
pub fn new_with_chain_config(chain_config: ChainConfig) -> Self {
let harness = BeaconChainHarness::new_with_chain_config(
MainnetEthSpec,
None,
generate_deterministic_keypairs(VALIDATOR_COUNT),
// Ensure we always have an aggregator for each slot.
u64::max_value(),
@ -170,7 +175,7 @@ impl ForkChoiceTest {
/// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error.
pub fn apply_blocks_while<F>(self, mut predicate: F) -> Result<Self, Self>
where
F: FnMut(&BeaconBlock<E>, &BeaconState<E>) -> bool,
F: FnMut(BeaconBlockRef<'_, E>, &BeaconState<E>) -> bool,
{
self.harness.advance_slot();
let mut state = self.harness.get_current_state();
@ -179,7 +184,7 @@ impl ForkChoiceTest {
let slot = self.harness.get_current_slot();
let (block, state_) = self.harness.make_block(state, slot);
state = state_;
if !predicate(&block.message, &state) {
if !predicate(block.message(), &state) {
break;
}
if let Ok(block_hash) = self.harness.process_block_result(block.clone()) {
@ -264,14 +269,15 @@ impl ForkChoiceTest {
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
func(&mut block.message, &mut state);
let (signed_block, mut state) = self.harness.make_block(state, slot);
let (mut block, _) = signed_block.deconstruct();
func(&mut block, &mut state);
let current_slot = self.harness.get_current_slot();
self.harness
.chain
.fork_choice
.write()
.on_block(current_slot, &block.message, block.canonical_root(), &state)
.on_block(current_slot, &block, block.canonical_root(), &state)
.unwrap();
self
}
@ -297,15 +303,16 @@ impl ForkChoiceTest {
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
mutation_func(&mut block.message, &mut state);
let (signed_block, mut state) = self.harness.make_block(state, slot);
let (mut block, _) = signed_block.deconstruct();
mutation_func(&mut block, &mut state);
let current_slot = self.harness.get_current_slot();
let err = self
.harness
.chain
.fork_choice
.write()
.on_block(current_slot, &block.message, block.canonical_root(), &state)
.on_block(current_slot, &block, block.canonical_root(), &state)
.err()
.expect("on_block did not return an error");
comparison_func(err);
@ -321,11 +328,11 @@ impl ForkChoiceTest {
let state_root = harness
.chain
.store
.get_item::<SignedBeaconBlock<E>>(&fc.fc_store().justified_checkpoint().root)
.get_block(&fc.fc_store().justified_checkpoint().root)
.unwrap()
.unwrap()
.message
.state_root;
.message()
.state_root();
let state = harness
.chain
.store
@ -333,7 +340,7 @@ impl ForkChoiceTest {
.unwrap()
.unwrap();
let balances = state
.validators
.validators()
.into_iter()
.map(|v| {
if v.is_active_at(state.current_epoch()) {
@ -401,7 +408,7 @@ impl ForkChoiceTest {
.sign(
&validator_sk,
validator_committee_index,
&head.beacon_state.fork,
&head.beacon_state.fork(),
self.harness.chain.genesis_validators_root,
&self.harness.chain.spec,
)
@ -467,7 +474,7 @@ fn is_safe_to_update(slot: Slot) -> bool {
#[test]
fn justified_checkpoint_updates_with_descendent_inside_safe_slots() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.move_inside_safe_to_update()
.assert_justified_epoch(0)
@ -481,7 +488,7 @@ fn justified_checkpoint_updates_with_descendent_inside_safe_slots() {
#[test]
fn justified_checkpoint_updates_with_descendent_outside_safe_slots() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch <= 2)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2)
.unwrap()
.move_outside_safe_to_update()
.assert_justified_epoch(2)
@ -496,7 +503,7 @@ fn justified_checkpoint_updates_with_descendent_outside_safe_slots() {
#[test]
fn justified_checkpoint_updates_first_justification_outside_safe_to_update() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.move_to_next_unsafe_period()
.assert_justified_epoch(0)
@ -512,19 +519,19 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() {
#[test]
fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.move_inside_safe_to_update()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should not change.
state.finalized_checkpoint.epoch = Epoch::new(0);
state.finalized_checkpoint().epoch = Epoch::new(0);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
state.current_justified_checkpoint_mut().epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
state.current_justified_checkpoint_mut().root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
@ -538,19 +545,19 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi
#[test]
fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.move_to_next_unsafe_period()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should not change.
state.finalized_checkpoint.epoch = Epoch::new(0);
state.finalized_checkpoint().epoch = Epoch::new(0);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
state.current_justified_checkpoint_mut().epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
state.current_justified_checkpoint_mut().root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
@ -564,19 +571,19 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f
#[test]
fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.move_to_next_unsafe_period()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should change.
state.finalized_checkpoint.epoch = Epoch::new(1);
state.finalized_checkpoint_mut().epoch = Epoch::new(1);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
state.current_justified_checkpoint_mut().epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
state.current_justified_checkpoint_mut().root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
@ -588,7 +595,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina
#[test]
fn justified_balances() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_justified_epoch(2)
@ -617,7 +624,7 @@ fn invalid_block_unknown_parent() {
.apply_blocks(2)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.parent_root = junk;
*block.parent_root_mut() = junk;
},
|err| {
assert_invalid_block!(
@ -638,7 +645,7 @@ fn invalid_block_future_slot() {
.apply_blocks(2)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.slot = block.slot + 1;
*block.slot_mut() += 1;
},
|err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }),
);
@ -650,12 +657,12 @@ fn invalid_block_future_slot() {
#[test]
fn invalid_block_finalized_slot() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.slot = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1;
*block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1;
},
|err| {
assert_invalid_block!(
@ -670,7 +677,7 @@ fn invalid_block_finalized_slot() {
/// Specification v0.12.1
///
/// assert get_ancestor(store, hash_tree_root(block), finalized_slot) ==
/// store.finalized_checkpoint.root
/// store.finalized_checkpoint().root
///
/// Note: we technically don't do this exact check, but an equivalent check. Reference:
///
@ -680,16 +687,16 @@ fn invalid_block_finalized_descendant() {
let invalid_ancestor = Mutex::new(Hash256::zero());
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2)
.apply_invalid_block_directly_to_fork_choice(
|block, state| {
block.parent_root = *state
*block.parent_root_mut() = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
*invalid_ancestor.lock().unwrap() = block.parent_root;
*invalid_ancestor.lock().unwrap() = block.parent_root();
},
|err| {
assert_invalid_block!(
@ -966,7 +973,7 @@ fn valid_attestation_skip_across_epoch() {
#[test]
fn can_read_finalized_block() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.check_finalized_block_is_accessible();
@ -1004,7 +1011,7 @@ fn weak_subjectivity_pass_on_startup() {
#[test]
fn weak_subjectivity_check_passes() {
let setup_harness = ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2);
@ -1022,7 +1029,7 @@ fn weak_subjectivity_check_passes() {
};
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2)
@ -1032,7 +1039,7 @@ fn weak_subjectivity_check_passes() {
#[test]
fn weak_subjectivity_check_fails_early_epoch() {
let setup_harness = ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2);
@ -1052,7 +1059,7 @@ fn weak_subjectivity_check_fails_early_epoch() {
};
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 3)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3)
.unwrap_err()
.assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent();
@ -1061,7 +1068,7 @@ fn weak_subjectivity_check_fails_early_epoch() {
#[test]
fn weak_subjectivity_check_fails_late_epoch() {
let setup_harness = ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2);
@ -1081,7 +1088,7 @@ fn weak_subjectivity_check_fails_late_epoch() {
};
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 4)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4)
.unwrap_err()
.assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent();
@ -1090,7 +1097,7 @@ fn weak_subjectivity_check_fails_late_epoch() {
#[test]
fn weak_subjectivity_check_fails_incorrect_root() {
let setup_harness = ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(2);
@ -1110,7 +1117,7 @@ fn weak_subjectivity_check_fails_incorrect_root() {
};
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 3)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3)
.unwrap_err()
.assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent();
@ -1120,7 +1127,7 @@ fn weak_subjectivity_check_fails_incorrect_root() {
fn weak_subjectivity_check_epoch_boundary_is_skip_slot() {
let setup_harness = ForkChoiceTest::new()
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap();
// get the head, it will become the finalized root of epoch 4
@ -1129,7 +1136,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() {
setup_harness
// epoch 3 will be entirely skip slots
.skip_slots(E::slots_per_epoch() as usize)
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(5);
@ -1147,10 +1154,10 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() {
// recreate the chain exactly
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.skip_slots(E::slots_per_epoch() as usize)
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(5)
@ -1161,7 +1168,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() {
fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
let setup_harness = ForkChoiceTest::new()
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap();
// get the head, it will become the finalized root of epoch 4
@ -1170,7 +1177,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
setup_harness
// epoch 3 will be entirely skip slots
.skip_slots(E::slots_per_epoch() as usize)
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5)
.unwrap()
.apply_blocks(1)
.assert_finalized_epoch(5);
@ -1188,10 +1195,10 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
// recreate the chain exactly
ForkChoiceTest::new_with_chain_config(chain_config.clone())
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.unwrap()
.skip_slots(E::slots_per_epoch() as usize)
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 6)
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6)
.unwrap_err()
.assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent();

View File

@ -70,6 +70,17 @@ macro_rules! define_mod {
pub value: T,
}
/// Compositional wrapper type that allows quotes or no quotes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)]
#[serde(transparent)]
pub struct MaybeQuoted<T>
where
T: From<$int> + Into<$int> + Copy + TryFrom<u64>,
{
#[serde(with = "self")]
pub value: T,
}
/// Serialize with quotes.
pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where

View File

@ -145,6 +145,31 @@ impl<'a> SszDecoderBuilder<'a> {
}
}
/// Registers a variable-length object as the next item in `bytes`, without specifying the
/// actual type.
///
/// ## Notes
///
/// Use of this function is generally discouraged since it cannot detect if some type changes
/// from variable to fixed length.
///
/// Use `Self::register_type` wherever possible.
pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> {
struct Anonymous;
impl Decode for Anonymous {
fn is_ssz_fixed_len() -> bool {
false
}
fn from_ssz_bytes(_bytes: &[u8]) -> Result<Self, DecodeError> {
unreachable!("Anonymous should never be decoded")
}
}
self.register_type::<Anonymous>()
}
/// Declares that some type `T` is the next item in `bytes`.
pub fn register_type<T: Decode>(&mut self) -> Result<(), DecodeError> {
if T::is_ssz_fixed_len() {
@ -277,6 +302,14 @@ impl<'a> SszDecoder<'a> {
pub fn decode_next<T: Decode>(&mut self) -> Result<T, DecodeError> {
T::from_ssz_bytes(self.items.remove(0))
}
/// Decodes the next item using the provided function.
pub fn decode_next_with<T, F>(&mut self, f: F) -> Result<T, DecodeError>
where
F: FnOnce(&'a [u8]) -> Result<T, DecodeError>,
{
f(self.items.remove(0))
}
}
/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte union index from `bytes`, where `bytes.len() >=

View File

@ -3,11 +3,9 @@
//!
//! Supports field attributes, see each derive macro for more information.
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, DeriveInput};
use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput};
/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields
/// that should not be serialized.
@ -57,7 +55,7 @@ fn should_skip_serializing(field: &syn::Field) -> bool {
})
}
/// Implements `ssz::Encode` for some `struct`.
/// Implements `ssz::Encode` for some `struct` or `enum`.
///
/// Fields are encoded in the order they are defined.
///
@ -68,17 +66,20 @@ fn should_skip_serializing(field: &syn::Field) -> bool {
pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let name = &item.ident;
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
match &item.data {
syn::Data::Struct(s) => ssz_encode_derive_struct(&item, s),
syn::Data::Enum(s) => ssz_encode_derive_enum(&item, s),
_ => panic!("ssz_derive only supports structs and enums"),
}
}
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
_ => panic!("ssz_derive only supports structs."),
};
fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream {
let name = &derive_input.ident;
let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl();
let field_idents = get_serializable_named_field_idents(&struct_data);
let field_idents_a = get_serializable_named_field_idents(&struct_data);
let field_types_a = get_serializable_field_types(&struct_data);
let field_idents = get_serializable_named_field_idents(struct_data);
let field_idents_a = get_serializable_named_field_idents(struct_data);
let field_types_a = get_serializable_field_types(struct_data);
let field_types_b = field_types_a.clone();
let field_types_d = field_types_a.clone();
let field_types_e = field_types_a.clone();
@ -152,6 +153,72 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
output.into()
}
/// Derive `Encode` for a restricted subset of all possible enum types.
///
/// Only supports:
/// - Enums with a single field per variant, where
/// - All fields are variably sized from an SSZ-perspective (not fixed size).
///
/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run
/// time* if the variable-size requirement isn't met.
fn ssz_encode_derive_enum(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream {
let name = &derive_input.ident;
let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl();
let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data
.variants
.iter()
.map(|variant| {
let variant_name = &variant.ident;
if variant.fields.len() != 1 {
panic!("ssz::Encode can only be derived for enums with 1 field per variant");
}
let pattern = quote! {
#name::#variant_name(ref inner)
};
let ty = &(&variant.fields).into_iter().next().unwrap().ty;
let type_assert = quote! {
!<#ty as ssz::Encode>::is_ssz_fixed_len()
};
(pattern, type_assert)
})
.unzip();
let output = quote! {
impl #impl_generics ssz::Encode for #name #ty_generics #where_clause {
fn is_ssz_fixed_len() -> bool {
assert!(
#(
#assert_exprs &&
)* true,
"not all enum variants are variably-sized"
);
false
}
fn ssz_bytes_len(&self) -> usize {
match self {
#(
#patterns => inner.ssz_bytes_len(),
)*
}
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match self {
#(
#patterns => inner.ssz_append(buf),
)*
}
}
}
};
output.into()
}
/// Returns true if some field has an attribute declaring it should not be deserialized.
///
/// The field attribute is: `#[ssz(skip_deserializing)]`

Some files were not shown because too many files have changed in this diff Show More