Merge pull request #1227 from sigp/spec-v0.12

Update to spec v0.12
This commit is contained in:
Paul Hauner 2020-06-28 20:03:49 +10:00 committed by GitHub
commit 95320f8ab0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
209 changed files with 5462 additions and 3175 deletions

157
Cargo.lock generated
View File

@ -45,40 +45,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"
[[package]]
name = "aes-ctr"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2e5b0458ea3beae0d1d8c0f3946564f8e10f90646cf78c06b4351052058d1ee"
dependencies = [
"aes-soft",
"aesni",
"ctr",
"stream-cipher",
]
[[package]]
name = "aes-soft"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d"
dependencies = [
"block-cipher-trait",
"byteorder",
"opaque-debug",
]
[[package]]
name = "aesni"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100"
dependencies = [
"block-cipher-trait",
"opaque-debug",
"stream-cipher",
]
[[package]]
name = "ahash"
version = "0.2.18"
@ -100,7 +66,7 @@ dependencies = [
[[package]]
name = "amcl"
version = "0.2.0"
source = "git+https://github.com/sigp/milagro_bls?tag=v1.0.1#2ccdd4b517c1ab3debe10277deed9d1b1cbbe9ce"
source = "git+https://github.com/sigp/milagro_bls?tag=v1.1.0#32c9f9382fc73f8976a00aca9773e6a322bb2c9e"
dependencies = [
"hex 0.3.2",
"lazy_static",
@ -271,6 +237,7 @@ dependencies = [
"bitvec",
"bls",
"bus",
"derivative",
"environment",
"eth1",
"eth2_config",
@ -278,6 +245,7 @@ dependencies = [
"eth2_ssz",
"eth2_ssz_derive",
"eth2_ssz_types",
"fork_choice",
"futures 0.3.5",
"genesis",
"integer-sqrt",
@ -289,7 +257,7 @@ dependencies = [
"merkle_proof",
"operation_pool",
"parking_lot 0.10.2",
"proto_array_fork_choice",
"proto_array",
"rand 0.7.3",
"rayon",
"safe_arith",
@ -300,6 +268,7 @@ dependencies = [
"slog",
"sloggers",
"slot_clock",
"smallvec 1.4.0",
"state_processing",
"store",
"tempfile",
@ -406,15 +375,6 @@ dependencies = [
"generic-array",
]
[[package]]
name = "block-cipher-trait"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774"
dependencies = [
"generic-array",
]
[[package]]
name = "block-padding"
version = "0.1.5"
@ -918,16 +878,6 @@ dependencies = [
"memchr",
]
[[package]]
name = "ctr"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "022cd691704491df67d25d006fe8eca083098253c4d43516c2206479c58c6736"
dependencies = [
"block-cipher-trait",
"stream-cipher",
]
[[package]]
name = "ctrlc"
version = "3.1.4"
@ -995,6 +945,17 @@ dependencies = [
"types",
]
[[package]]
name = "derivative"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "derive_arbitrary"
version = "0.4.4"
@ -1293,6 +1254,7 @@ dependencies = [
name = "eth2_key_derivation"
version = "0.1.0"
dependencies = [
"bls",
"hex 0.3.2",
"num-bigint-dig",
"ring",
@ -1305,6 +1267,7 @@ name = "eth2_keystore"
version = "0.1.0"
dependencies = [
"bls",
"eth2_key_derivation",
"eth2_ssz",
"hex 0.3.2",
"rand 0.7.3",
@ -1572,6 +1535,21 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "fork_choice"
version = "0.1.0"
dependencies = [
"beacon_chain",
"eth2_ssz",
"eth2_ssz_derive",
"proto_array",
"slot_clock",
"state_processing",
"store",
"tree_hash",
"types",
]
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
@ -2351,7 +2329,6 @@ dependencies = [
"libp2p-identify",
"libp2p-mplex",
"libp2p-noise",
"libp2p-secio",
"libp2p-swarm",
"libp2p-websocket",
"libp2p-yamux",
@ -2496,36 +2473,6 @@ dependencies = [
"zeroize",
]
[[package]]
name = "libp2p-secio"
version = "0.19.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b73f0cc119c83a5b619d6d11074a319fdb4aa4daf8088ade00d511418566e28"
dependencies = [
"aes-ctr",
"ctr",
"futures 0.3.5",
"hmac",
"js-sys",
"lazy_static",
"libp2p-core",
"log 0.4.8",
"parity-send-wrapper",
"pin-project",
"prost",
"prost-build",
"quicksink",
"rand 0.7.3",
"ring",
"rw-stream-sink",
"sha2",
"static_assertions",
"twofish",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "libp2p-swarm"
version = "0.19.0"
@ -2789,8 +2736,8 @@ dependencies = [
[[package]]
name = "milagro_bls"
version = "1.0.1"
source = "git+https://github.com/sigp/milagro_bls?tag=v1.0.1#2ccdd4b517c1ab3debe10277deed9d1b1cbbe9ce"
version = "1.1.0"
source = "git+https://github.com/sigp/milagro_bls?tag=v1.1.0#32c9f9382fc73f8976a00aca9773e6a322bb2c9e"
dependencies = [
"amcl",
"hex 0.4.2",
@ -2982,6 +2929,7 @@ dependencies = [
"error-chain",
"eth2_libp2p",
"eth2_ssz",
"eth2_ssz_types",
"exit-future",
"fnv",
"futures 0.3.5",
@ -3000,6 +2948,7 @@ dependencies = [
"sloggers",
"slot_clock",
"smallvec 1.4.0",
"state_processing",
"store",
"tempfile",
"tokio 0.2.21",
@ -3233,12 +3182,6 @@ dependencies = [
"serde",
]
[[package]]
name = "parity-send-wrapper"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f"
[[package]]
name = "parking_lot"
version = "0.9.0"
@ -3495,12 +3438,11 @@ dependencies = [
]
[[package]]
name = "proto_array_fork_choice"
name = "proto_array"
version = "0.2.0"
dependencies = [
"eth2_ssz",
"eth2_ssz_derive",
"parking_lot 0.10.2",
"serde",
"serde_derive",
"serde_yaml",
@ -3795,7 +3737,7 @@ dependencies = [
"futures 0.3.5",
"hex 0.4.2",
"operation_pool",
"proto_array_fork_choice",
"proto_array",
"reqwest",
"rest_types",
"serde",
@ -4681,15 +4623,6 @@ dependencies = [
"types",
]
[[package]]
name = "stream-cipher"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8131256a5896cabcf5eb04f4d6dacbe1aefda854b0d9896e09cb58829ec5638c"
dependencies = [
"generic-array",
]
[[package]]
name = "string"
version = "0.2.1"
@ -5344,17 +5277,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"
[[package]]
name = "twofish"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1"
dependencies = [
"block-cipher-trait",
"byteorder",
"opaque-debug",
]
[[package]]
name = "typeable"
version = "0.1.2"
@ -5377,6 +5299,7 @@ dependencies = [
"compare_fields",
"compare_fields_derive",
"criterion",
"derivative",
"dirs",
"eth2_hashing",
"eth2_interop_keypairs",

View File

@ -35,7 +35,8 @@ members = [
"consensus/cached_tree_hash",
"consensus/int_to_bytes",
"consensus/proto_array_fork_choice",
"consensus/fork_choice",
"consensus/proto_array",
"consensus/safe_arith",
"consensus/ssz",
"consensus/ssz_derive",

View File

@ -37,7 +37,7 @@ Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
Current development overview:
- Specification `v0.11.1` implemented, optimized and passing test vectors.
- Specification `v0.12.1` implemented, optimized and passing test vectors.
- Rust-native libp2p with Gossipsub and Discv5.
- RESTful JSON API via HTTP server.
- Events via WebSocket.

View File

@ -15,6 +15,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
store = { path = "../store" }
parking_lot = "0.10.2"
lazy_static = "1.4.0"
smallvec = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
log = "0.4.8"
operation_pool = { path = "../operation_pool" }
@ -40,15 +41,14 @@ futures = "0.3.5"
genesis = { path = "../genesis" }
integer-sqrt = "0.1.3"
rand = "0.7.3"
proto_array_fork_choice = { path = "../../consensus/proto_array_fork_choice" }
proto_array = { path = "../../consensus/proto_array" }
lru = "0.5.1"
tempfile = "3.1.0"
bitvec = "0.17.4"
bls = { path = "../../crypto/bls" }
safe_arith = { path = "../../consensus/safe_arith" }
fork_choice = { path = "../../consensus/fork_choice" }
environment = { path = "../../lighthouse/environment" }
bus = "2.2.3"
derivative = "2.1.1"
itertools = "0.9.0"
[dev-dependencies]
lazy_static = "1.4.0"

View File

@ -23,7 +23,7 @@
//! -------------------------------------
//! |
//! ▼
//! ForkChoiceVerifiedAttestation
//! impl SignatureVerifiedAttestation
//! ```
use crate::{
@ -52,7 +52,7 @@ use std::borrow::Cow;
use tree_hash::TreeHash;
use types::{
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot,
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
};
/// Returned when an attestation was not successfully verified. It might not have been verified for
@ -123,6 +123,11 @@ pub enum Error {
/// The attestation is attesting to a state that is later than itself. (Viz., attesting to the
/// future).
AttestsToFutureBlock { block: Slot, attestation: Slot },
/// The attestation was received on an invalid attestation subnet.
InvalidSubnetId {
received: SubnetId,
expected: SubnetId,
},
/// The attestation failed the `state_processing` verification stage.
Invalid(AttestationValidationError),
/// There was an error whilst processing the attestation. It is not known if it is valid or invalid.
@ -158,65 +163,21 @@ impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> {
}
}
/// Wraps an `indexed_attestation` that is valid for application to fork choice. The
/// `indexed_attestation` will have been generated via the `VerifiedAggregatedAttestation` or
/// `VerifiedUnaggregatedAttestation` wrappers.
pub struct ForkChoiceVerifiedAttestation<'a, T: BeaconChainTypes> {
indexed_attestation: &'a IndexedAttestation<T::EthSpec>,
}
/// A helper trait implemented on wrapper types that can be progressed to a state where they can be
/// verified for application to fork choice.
pub trait IntoForkChoiceVerifiedAttestation<'a, T: BeaconChainTypes> {
fn into_fork_choice_verified_attestation(
&'a self,
chain: &BeaconChain<T>,
) -> Result<ForkChoiceVerifiedAttestation<'a, T>, Error>;
pub trait SignatureVerifiedAttestation<T: BeaconChainTypes> {
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec>;
}
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
for VerifiedAggregatedAttestation<T>
{
/// Progresses the `VerifiedAggregatedAttestation` to a stage where it is valid for application
/// to the fork-choice rule (or not).
fn into_fork_choice_verified_attestation(
&'a self,
chain: &BeaconChain<T>,
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
ForkChoiceVerifiedAttestation::from_signature_verified_components(
&self.indexed_attestation,
chain,
)
impl<'a, T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedAggregatedAttestation<T> {
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
&self.indexed_attestation
}
}
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
for VerifiedUnaggregatedAttestation<T>
{
/// Progresses the `Attestation` to a stage where it is valid for application to the
/// fork-choice rule (or not).
fn into_fork_choice_verified_attestation(
&'a self,
chain: &BeaconChain<T>,
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
ForkChoiceVerifiedAttestation::from_signature_verified_components(
&self.indexed_attestation,
chain,
)
}
}
impl<'a, T: BeaconChainTypes> IntoForkChoiceVerifiedAttestation<'a, T>
for ForkChoiceVerifiedAttestation<'a, T>
{
/// Simply returns itself.
fn into_fork_choice_verified_attestation(
&'a self,
_: &BeaconChain<T>,
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
Ok(Self {
indexed_attestation: self.indexed_attestation,
})
impl<T: BeaconChainTypes> SignatureVerifiedAttestation<T> for VerifiedUnaggregatedAttestation<T> {
fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
&self.indexed_attestation
}
}
@ -235,12 +196,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
// We do not queue future attestations for later processing.
verify_propagation_slot_range(chain, attestation)?;
// Ensure the aggregated attestation has not already been seen locally.
//
// TODO: this part of the code is not technically to spec, however I have raised a PR to
// change it:
//
// https://github.com/ethereum/eth2.0-specs/pull/1749
// Ensure the valid aggregated attestation has not already been seen locally.
let attestation_root = attestation.tree_hash_root();
if chain
.observed_attestations
@ -278,56 +234,36 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
// attestation and do not delay consideration for later.
verify_head_block_is_known(chain, &attestation)?;
let indexed_attestation = map_attestation_committee(chain, attestation, |committee| {
// Note: this clones the signature which is known to be a relatively slow operation.
//
// Future optimizations should remove this clone.
let selection_proof =
SelectionProof::from(signed_aggregate.message.selection_proof.clone());
if !selection_proof
.is_aggregator(committee.committee.len(), &chain.spec)
.map_err(|e| Error::BeaconChainError(e.into()))?
{
return Err(Error::InvalidSelectionProof { aggregator_index });
}
/*
* I have raised a PR that will likely get merged in v0.12.0:
*
* https://github.com/ethereum/eth2.0-specs/pull/1732
*
* If this PR gets merged, uncomment this code and remove the code below.
*
if !committee
.committee
.iter()
.any(|validator_index| *validator_index as u64 == aggregator_index)
{
return Err(Error::AggregatorNotInCommittee { aggregator_index });
}
*/
get_indexed_attestation(committee.committee, &attestation)
.map_err(|e| BeaconChainError::from(e).into())
})?;
// Ensure the aggregator is in the attestation.
//
// I've raised an issue with this here:
//
// https://github.com/ethereum/eth2.0-specs/pull/1732
//
// I suspect PR my will get merged in v0.12 and we'll need to delete this code and
// uncomment the code above.
if !indexed_attestation
.attesting_indices
.iter()
.any(|validator_index| *validator_index as u64 == aggregator_index)
{
return Err(Error::AggregatorNotInCommittee { aggregator_index });
// Ensure that the attestation has participants.
if attestation.aggregation_bits.is_zero() {
return Err(Error::EmptyAggregationBitfield);
}
let indexed_attestation =
map_attestation_committee(chain, attestation, |(committee, _)| {
// Note: this clones the signature which is known to be a relatively slow operation.
//
// Future optimizations should remove this clone.
let selection_proof =
SelectionProof::from(signed_aggregate.message.selection_proof.clone());
if !selection_proof
.is_aggregator(committee.committee.len(), &chain.spec)
.map_err(|e| Error::BeaconChainError(e.into()))?
{
return Err(Error::InvalidSelectionProof { aggregator_index });
}
// Ensure the aggregator is a member of the committee for which it is aggregating.
if !committee.committee.contains(&(aggregator_index as usize)) {
return Err(Error::AggregatorNotInCommittee { aggregator_index });
}
get_indexed_attestation(committee.committee, &attestation)
.map_err(|e| BeaconChainError::from(e).into())
})?;
// Ensure that all signatures are valid.
if !verify_signed_aggregate_signatures(chain, &signed_aggregate, &indexed_attestation)? {
return Err(Error::InvalidSignature);
}
@ -370,14 +306,6 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
chain.add_to_block_inclusion_pool(self)
}
/// A helper function to add this aggregate to `beacon_chain.fork_choice`.
pub fn add_to_fork_choice(
&self,
chain: &BeaconChain<T>,
) -> Result<ForkChoiceVerifiedAttestation<T>, Error> {
chain.apply_attestation_to_fork_choice(self)
}
/// Returns the underlying `attestation` for the `signed_aggregate`.
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
&self.signed_aggregate.message.aggregate
@ -387,8 +315,12 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
/// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip
/// network.
///
/// `subnet_id` is the subnet from which we received this attestation. This function will
/// verify that it was received on the correct subnet.
pub fn verify(
attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
chain: &BeaconChain<T>,
) -> Result<Self, Error> {
// Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
@ -408,7 +340,23 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
// attestation and do not delay consideration for later.
verify_head_block_is_known(chain, &attestation)?;
let indexed_attestation = obtain_indexed_attestation(chain, &attestation)?;
let (indexed_attestation, committees_per_slot) =
obtain_indexed_attestation_and_committees_per_slot(chain, &attestation)?;
let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::<T::EthSpec>(
&indexed_attestation.data,
committees_per_slot,
&chain.spec,
)
.map_err(BeaconChainError::from)?;
// Ensure the attestation is from the correct subnet.
if subnet_id != expected_subnet_id {
return Err(Error::InvalidSubnetId {
received: subnet_id,
expected: expected_subnet_id,
});
}
let validator_index = *indexed_attestation
.attesting_indices
@ -475,114 +423,6 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
}
}
impl<'a, T: BeaconChainTypes> ForkChoiceVerifiedAttestation<'a, T> {
/// Returns `Ok(Self)` if the `attestation` is valid to be applied to the beacon chain fork
/// choice.
///
/// The supplied `indexed_attestation` MUST have a valid signature, this function WILL NOT
/// CHECK THE SIGNATURE. Use the `VerifiedAggregatedAttestation` or
/// `VerifiedUnaggregatedAttestation` structs to do signature verification.
fn from_signature_verified_components(
indexed_attestation: &'a IndexedAttestation<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, Error> {
// There is no point in processing an attestation with an empty bitfield. Reject
// it immediately.
//
// This is not in the specification, however it should be transparent to other nodes. We
// return early here to avoid wasting precious resources verifying the rest of it.
if indexed_attestation.attesting_indices.len() == 0 {
return Err(Error::EmptyAggregationBitfield);
}
let slot_now = chain.slot()?;
let epoch_now = slot_now.epoch(T::EthSpec::slots_per_epoch());
let target = indexed_attestation.data.target.clone();
// Attestation must be from the current or previous epoch.
if target.epoch > epoch_now {
return Err(Error::FutureEpoch {
attestation_epoch: target.epoch,
current_epoch: epoch_now,
});
} else if target.epoch + 1 < epoch_now {
return Err(Error::PastEpoch {
attestation_epoch: target.epoch,
current_epoch: epoch_now,
});
}
if target.epoch
!= indexed_attestation
.data
.slot
.epoch(T::EthSpec::slots_per_epoch())
{
return Err(Error::BadTargetEpoch);
}
// Attestation target must be for a known block.
if !chain.fork_choice.contains_block(&target.root) {
return Err(Error::UnknownTargetRoot(target.root));
}
// TODO: we're not testing an assert from the spec:
//
// `assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)`
//
// I think this check is redundant and I've raised an issue here:
//
// https://github.com/ethereum/eth2.0-specs/pull/1755
//
// To resolve this todo, observe the outcome of the above PR.
// Load the slot and state root for `attestation.data.beacon_block_root`.
//
// This indirectly checks to see if the `attestation.data.beacon_block_root` is in our fork
// choice. Any known, non-finalized block should be in fork choice, so this check
// immediately filters out attestations that attest to a block that has not been processed.
//
// Attestations must be for a known block. If the block is unknown, we simply drop the
// attestation and do not delay consideration for later.
let (block_slot, _state_root) = chain
.fork_choice
.block_slot_and_state_root(&indexed_attestation.data.beacon_block_root)
.ok_or_else(|| Error::UnknownHeadBlock {
beacon_block_root: indexed_attestation.data.beacon_block_root,
})?;
// TODO: currently we do not check the FFG source/target. This is what the spec dictates
// but it seems wrong.
//
// I have opened an issue on the specs repo for this:
//
// https://github.com/ethereum/eth2.0-specs/issues/1636
//
// We should revisit this code once that issue has been resolved.
// Attestations must not be for blocks in the future. If this is the case, the attestation
// should not be considered.
if block_slot > indexed_attestation.data.slot {
return Err(Error::AttestsToFutureBlock {
block: block_slot,
attestation: indexed_attestation.data.slot,
});
}
// Note: we're not checking the "attestations can only affect the fork choice of subsequent
// slots" part of the spec, we do this upstream.
Ok(Self {
indexed_attestation,
})
}
/// Returns the wrapped `IndexedAttestation`.
pub fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> {
&self.indexed_attestation
}
}
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
///
/// The block root may not be known for two reasons:
@ -599,6 +439,7 @@ fn verify_head_block_is_known<T: BeaconChainTypes>(
) -> Result<(), Error> {
if chain
.fork_choice
.read()
.contains_block(&attestation.data.beacon_block_root)
{
Ok(())
@ -691,8 +532,8 @@ pub fn verify_attestation_signature<T: BeaconChainTypes>(
/// includes three signatures:
///
/// - `signed_aggregate.signature`
/// - `signed_aggregate.signature.message.selection proof`
/// - `signed_aggregate.signature.message.aggregate.signature`
/// - `signed_aggregate.message.selection_proof`
/// - `signed_aggregate.message.aggregate.signature`
///
/// # Returns
///
@ -751,19 +592,23 @@ pub fn verify_signed_aggregate_signatures<T: BeaconChainTypes>(
Ok(verify_signature_sets(signature_sets))
}
/// Returns the `indexed_attestation` for the `attestation` using the public keys cached in the
/// `chain`.
pub fn obtain_indexed_attestation<T: BeaconChainTypes>(
/// Assists in readability.
type CommitteesPerSlot = u64;
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
/// public keys cached in the `chain`.
pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
attestation: &Attestation<T::EthSpec>,
) -> Result<IndexedAttestation<T::EthSpec>, Error> {
map_attestation_committee(chain, attestation, |committee| {
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| {
get_indexed_attestation(committee.committee, &attestation)
.map(|attestation| (attestation, committees_per_slot))
.map_err(|e| BeaconChainError::from(e).into())
})
}
/// Runs the `map_fn` with the committee for the given `attestation`.
/// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`.
///
/// This function exists in this odd "map" pattern because efficiently obtaining the committee for
/// an attestation can be complex. It might involve reading straight from the
@ -779,7 +624,7 @@ pub fn map_attestation_committee<'a, T, F, R>(
) -> Result<R, Error>
where
T: BeaconChainTypes,
F: Fn(BeaconCommittee) -> Result<R, Error>,
F: Fn((BeaconCommittee, CommitteesPerSlot)) -> Result<R, Error>,
{
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
let target = &attestation.data.target;
@ -791,9 +636,10 @@ where
// processing an attestation that does not include our latest finalized block in its chain.
//
// We do not delay consideration for later, we simply drop the attestation.
let (target_block_slot, target_block_state_root) = chain
let target_block = chain
.fork_choice
.block_slot_and_state_root(&target.root)
.read()
.get_block(&target.root)
.ok_or_else(|| Error::UnknownTargetRoot(target.root))?;
// Obtain the shuffling cache, timing how long we wait.
@ -808,9 +654,10 @@ where
metrics::stop_timer(cache_wait_timer);
if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) {
let committees_per_slot = committee_cache.committees_per_slot();
committee_cache
.get_beacon_committee(attestation.data.slot, attestation.data.index)
.map(map_fn)
.map(|committee| map_fn((committee, committees_per_slot)))
.unwrap_or_else(|| {
Err(Error::NoCommitteeForSlotAndIndex {
slot: attestation.data.slot,
@ -826,15 +673,15 @@ where
chain.log,
"Attestation processing cache miss";
"attn_epoch" => attestation_epoch.as_u64(),
"target_block_epoch" => target_block_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
"target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
);
let state_read_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
let mut state = chain
.get_state(&target_block_state_root, Some(target_block_slot))?
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block_state_root))?;
.get_state(&target_block.state_root, Some(target_block.slot))?
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?;
metrics::stop_timer(state_read_timer);
let state_skip_timer =
@ -873,9 +720,10 @@ where
metrics::stop_timer(committee_building_timer);
let committees_per_slot = committee_cache.committees_per_slot();
committee_cache
.get_beacon_committee(attestation.data.slot, attestation.data.index)
.map(map_fn)
.map(|committee| map_fn((committee, committees_per_slot)))
.unwrap_or_else(|| {
Err(Error::NoCommitteeForSlotAndIndex {
slot: attestation.data.slot,

View File

@ -1,6 +1,6 @@
use crate::attestation_verification::{
Error as AttestationError, ForkChoiceVerifiedAttestation, IntoForkChoiceVerifiedAttestation,
VerifiedAggregatedAttestation, VerifiedUnaggregatedAttestation,
Error as AttestationError, SignatureVerifiedAttestation, VerifiedAggregatedAttestation,
VerifiedUnaggregatedAttestation,
};
use crate::block_verification::{
check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError,
@ -9,7 +9,6 @@ use crate::block_verification::{
use crate::errors::{BeaconChainError as Error, BlockProductionError};
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
use crate::events::{EventHandler, EventKind};
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
use crate::head_tracker::HeadTracker;
use crate::metrics;
use crate::migrate::Migrate;
@ -17,21 +16,26 @@ use crate::naive_aggregation_pool::{Error as NaiveAggregationError, NaiveAggrega
use crate::observed_attestations::{Error as AttestationObservationError, ObservedAttestations};
use crate::observed_attesters::{ObservedAggregators, ObservedAttesters};
use crate::observed_block_producers::ObservedBlockProducers;
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::persisted_fork_choice::PersistedForkChoice;
use crate::shuffling_cache::ShufflingCache;
use crate::snapshot_cache::SnapshotCache;
use crate::timeout_rw_lock::TimeoutRwLock;
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::BeaconForkChoiceStore;
use crate::BeaconSnapshot;
use fork_choice::ForkChoice;
use itertools::process_results;
use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::RwLock;
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
ProposerSlashingValidationError,
use state_processing::{
common::get_indexed_attestation, per_block_processing,
per_block_processing::errors::AttestationValidationError, per_slot_processing,
BlockSignatureStrategy, SigVerifiedOp,
};
use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy};
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::HashMap;
@ -43,6 +47,8 @@ use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterato
use store::{Error as DBError, HotColdDB};
use types::*;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
// Text included in blocks.
// Must be 32-bytes or panic.
//
@ -82,9 +88,9 @@ pub enum ChainSegmentResult {
},
}
/// The accepted clock drift for nodes gossiping blocks and attestations (spec v0.11.0). See:
/// The accepted clock drift for nodes gossiping blocks and attestations. See:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/p2p-interface.md#configuration
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500);
#[derive(Debug, PartialEq)]
@ -185,6 +191,12 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub observed_aggregators: ObservedAggregators<T::EthSpec>,
/// Maintains a record of which validators have proposed blocks for each slot.
pub observed_block_producers: ObservedBlockProducers<T::EthSpec>,
/// Maintains a record of which validators have submitted voluntary exits.
pub observed_voluntary_exits: ObservedOperations<SignedVoluntaryExit, T::EthSpec>,
/// Maintains a record of which validators we've seen proposer slashings for.
pub observed_proposer_slashings: ObservedOperations<ProposerSlashing, T::EthSpec>,
/// Maintains a record of which validators we've seen attester slashings for.
pub observed_attester_slashings: ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>,
/// Provides information from the Ethereum 1 (PoW) chain.
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received.
@ -195,7 +207,9 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub genesis_validators_root: Hash256,
/// A state-machine that is updated with information from the network and chooses a canonical
/// head block.
pub fork_choice: ForkChoice<T>,
pub fork_choice: RwLock<
ForkChoice<BeaconForkChoiceStore<T::EthSpec, T::HotStore, T::ColdStore>, T::EthSpec>,
>,
/// A handler for events generated by the beacon chain.
pub event_handler: T::EventHandler,
/// Used to track the heads of the beacon chain.
@ -240,11 +254,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE);
let fork_choice = self.fork_choice.read();
self.store.put_item(
&Hash256::from_slice(&FORK_CHOICE_DB_KEY),
&self.fork_choice.as_ssz_container(),
&PersistedForkChoice {
fork_choice: fork_choice.to_persisted(),
fork_choice_store: fork_choice.fc_store().to_persisted(),
},
)?;
drop(fork_choice);
metrics::stop_timer(fork_choice_timer);
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
@ -263,21 +284,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// This operation is typically slow and causes a lot of allocations. It should be used
/// sparingly.
pub fn persist_op_pool(&self) -> Result<(), Error> {
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
self.store.put_item(
&Hash256::from_slice(&OP_POOL_DB_KEY),
&PersistedOperationPool::from_operation_pool(&self.op_pool),
)?;
metrics::stop_timer(timer);
Ok(())
}
/// Persists `self.eth1_chain` and its caches to disk.
pub fn persist_eth1_cache(&self) -> Result<(), Error> {
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
self.store.put_item(
@ -286,8 +305,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)?;
}
metrics::stop_timer(timer);
Ok(())
}
@ -859,12 +876,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn verify_unaggregated_attestation_for_gossip(
&self,
attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> {
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS);
let _timer =
metrics::start_timer(&metrics::UNAGGREGATED_ATTESTATION_GOSSIP_VERIFICATION_TIMES);
VerifiedUnaggregatedAttestation::verify(attestation, self).map(|v| {
VerifiedUnaggregatedAttestation::verify(attestation, subnet_id, self).map(|v| {
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);
v
})
@ -889,23 +907,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Accepts some attestation-type object and attempts to verify it in the context of fork
/// choice. If it is valid it is applied to `self.fork_choice`.
///
/// Common items that implement `IntoForkChoiceVerifiedAttestation`:
/// Common items that implement `SignatureVerifiedAttestation`:
///
/// - `VerifiedUnaggregatedAttestation`
/// - `VerifiedAggregatedAttestation`
/// - `ForkChoiceVerifiedAttestation`
pub fn apply_attestation_to_fork_choice<'a>(
&self,
unverified_attestation: &'a impl IntoForkChoiceVerifiedAttestation<'a, T>,
) -> Result<ForkChoiceVerifiedAttestation<'a, T>, AttestationError> {
let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE);
verified: &'a impl SignatureVerifiedAttestation<T>,
) -> Result<(), Error> {
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
let verified = unverified_attestation.into_fork_choice_verified_attestation(self)?;
let indexed_attestation = verified.indexed_attestation();
self.fork_choice
.process_indexed_attestation(indexed_attestation)
.map_err(|e| Error::from(e))?;
Ok(verified)
.write()
.on_attestation(self.slot()?, verified.indexed_attestation())
.map_err(Into::into)
}
/// Accepts an `VerifiedUnaggregatedAttestation` and attempts to apply it to the "naive
@ -1041,8 +1056,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// pivot block is the same as the current state's pivot block. If it is, then the
// attestation's shuffling is the same as the current state's.
// To account for skipped slots, find the first block at *or before* the pivot slot.
let fork_choice_lock = self.fork_choice.core_proto_array();
let fork_choice_lock = self.fork_choice.read();
let pivot_block_root = fork_choice_lock
.proto_array()
.core_proto_array()
.iter_block_roots(block_root)
.find(|(_, slot)| *slot <= pivot_slot)
.map(|(block_root, _)| block_root);
@ -1062,81 +1079,68 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}
/// Accept some exit and queue it for inclusion in an appropriate block.
pub fn process_voluntary_exit(
/// Verify a voluntary exit before allowing it to propagate on the gossip network.
pub fn verify_voluntary_exit_for_gossip(
&self,
exit: SignedVoluntaryExit,
) -> Result<(), ExitValidationError> {
match self.wall_clock_state() {
Ok(state) => {
if self.eth1_chain.is_some() {
self.op_pool.insert_voluntary_exit(exit, &state, &self.spec)
} else {
Ok(())
}
}
Err(e) => {
error!(
&self.log,
"Unable to process voluntary exit";
"error" => format!("{:?}", e),
"reason" => "no state"
);
Ok(())
}
) -> Result<ObservationOutcome<SignedVoluntaryExit>, Error> {
// NOTE: this could be more efficient if it avoided cloning the head state
let wall_clock_state = self.wall_clock_state()?;
Ok(self
.observed_voluntary_exits
.verify_and_observe(exit, &wall_clock_state, &self.spec)?)
}
/// Accept a pre-verified exit and queue it for inclusion in an appropriate block.
pub fn import_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit>) {
if self.eth1_chain.is_some() {
self.op_pool.insert_voluntary_exit(exit)
}
}
/// Verify a proposer slashing before allowing it to propagate on the gossip network.
pub fn verify_proposer_slashing_for_gossip(
&self,
proposer_slashing: ProposerSlashing,
) -> Result<ObservationOutcome<ProposerSlashing>, Error> {
let wall_clock_state = self.wall_clock_state()?;
Ok(self.observed_proposer_slashings.verify_and_observe(
proposer_slashing,
&wall_clock_state,
&self.spec,
)?)
}
/// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn process_proposer_slashing(
&self,
proposer_slashing: ProposerSlashing,
) -> Result<(), ProposerSlashingValidationError> {
match self.wall_clock_state() {
Ok(state) => {
if self.eth1_chain.is_some() {
self.op_pool
.insert_proposer_slashing(proposer_slashing, &state, &self.spec)
} else {
Ok(())
}
}
Err(e) => {
error!(
&self.log,
"Unable to process proposer slashing";
"error" => format!("{:?}", e),
"reason" => "no state"
);
Ok(())
}
pub fn import_proposer_slashing(&self, proposer_slashing: SigVerifiedOp<ProposerSlashing>) {
if self.eth1_chain.is_some() {
self.op_pool.insert_proposer_slashing(proposer_slashing)
}
}
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn process_attester_slashing(
/// Verify an attester slashing before allowing it to propagate on the gossip network.
pub fn verify_attester_slashing_for_gossip(
&self,
attester_slashing: AttesterSlashing<T::EthSpec>,
) -> Result<(), AttesterSlashingValidationError> {
match self.wall_clock_state() {
Ok(state) => {
if self.eth1_chain.is_some() {
self.op_pool
.insert_attester_slashing(attester_slashing, &state, &self.spec)
} else {
Ok(())
}
}
Err(e) => {
error!(
&self.log,
"Unable to process attester slashing";
"error" => format!("{:?}", e),
"reason" => "no state"
);
Ok(())
}
) -> Result<ObservationOutcome<AttesterSlashing<T::EthSpec>>, Error> {
let wall_clock_state = self.wall_clock_state()?;
Ok(self.observed_attester_slashings.verify_and_observe(
attester_slashing,
&wall_clock_state,
&self.spec,
)?)
}
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn import_attester_slashing(
&self,
attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>>,
) -> Result<(), Error> {
if self.eth1_chain.is_some() {
self.op_pool
.insert_attester_slashing(attester_slashing, self.head_info()?.fork)
}
Ok(())
}
/// Attempt to verify and import a chain of blocks to `self`.
@ -1333,7 +1337,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
unverified_block: B,
) -> Result<Hash256, BlockError> {
// Start the Prometheus timer.
let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
// Increment the Prometheus counter for block processing requests.
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
@ -1401,9 +1405,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
};
// Stop the Prometheus timer.
metrics::stop_timer(full_timer);
result
}
@ -1422,6 +1423,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let state = fully_verified_block.state;
let parent_block = fully_verified_block.parent_block;
let intermediate_states = fully_verified_block.intermediate_states;
let current_slot = self.slot()?;
let attestation_observation_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
@ -1441,9 +1443,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::stop_timer(attestation_observation_timer);
let fork_choice_register_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER);
// If there are new validators in this block, update our pubkey cache.
//
// We perform this _before_ adding the block to fork choice because the pubkey cache is
@ -1479,20 +1478,35 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
shuffling_cache.insert(state.current_epoch(), target_root, committee_cache);
}
let mut fork_choice = self.fork_choice.write();
// Register the new block with the fork choice service.
if let Err(e) = self
.fork_choice
.process_block(self, &state, block, block_root)
{
error!(
self.log,
"Add block to fork choice failed";
"block_root" => format!("{}", block_root),
"error" => format!("{:?}", e),
)
let _fork_choice_block_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
fork_choice
.on_block(current_slot, block, block_root, &state)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
}
metrics::stop_timer(fork_choice_register_timer);
// Register each attestation in the block with the fork choice service.
for attestation in &block.body.attestations[..] {
let _fork_choice_attestation_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
let committee =
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
match fork_choice.on_attestation(current_slot, &indexed_attestation) {
Ok(()) => Ok(()),
// Ignore invalid attestations whilst importing attestations from a block. The
// block might be very old and therefore the attestations useless to fork choice.
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
Err(e) => Err(BlockError::BeaconChainError(e.into())),
}?;
}
metrics::observe(
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
@ -1514,6 +1528,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.store.put_state(&block.state_root, &state)?;
self.store.put_block(&block_root, signed_block.clone())?;
// The fork choice write-lock is dropped *after* the on-disk database has been updated.
// This prevents inconsistency between the two at the expense of concurrency.
drop(fork_choice);
let parent_root = block.parent_root;
let slot = block.slot;
@ -1605,8 +1623,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut graffiti: [u8; 32] = [0; 32];
graffiti.copy_from_slice(GRAFFITI.as_bytes());
let (proposer_slashings, attester_slashings) =
self.op_pool.get_slashings(&state, &self.spec);
let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state);
let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?;
let deposits = eth1_chain
@ -1682,7 +1699,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Execute the fork choice algorithm and enthrone the result as the canonical head.
pub fn fork_choice(&self) -> Result<(), Error> {
metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS);
let overall_timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES);
let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES);
let result = self.fork_choice_internal();
@ -1690,14 +1707,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS);
}
metrics::stop_timer(overall_timer);
result
}
fn fork_choice_internal(&self) -> Result<(), Error> {
// Determine the root of the block that is the head of the chain.
let beacon_block_root = self.fork_choice.find_head(&self)?;
let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?;
let current_head = self.head_info()?;
let old_finalized_root = current_head.finalized_checkpoint.root;
@ -1877,7 +1892,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
new_epoch: new_finalized_epoch,
})
} else {
self.fork_choice.prune()?;
self.fork_choice.write().prune()?;
self.observed_block_producers
.prune(new_finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()));
@ -1900,7 +1915,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.get_state(&finalized_block.state_root, Some(finalized_block.slot))?
.ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?;
self.op_pool.prune_all(&finalized_state, &self.spec);
self.op_pool
.prune_all(&finalized_state, self.head_info()?.fork);
// TODO: configurable max finality distance
let max_finality_distance = 0;

View File

@ -0,0 +1,352 @@
//! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice`
//! struct.
//!
//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database
//! reads when fork choice requires the validator balances of the justified state.
use crate::{metrics, BeaconSnapshot};
use fork_choice::ForkChoiceStore;
use ssz_derive::{Decode, Encode};
use std::marker::PhantomData;
use std::sync::Arc;
use store::{Error as StoreError, HotColdDB, ItemStore};
use types::{
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock,
Slot,
};
#[derive(Debug)]
pub enum Error {
UnableToReadSlot,
UnableToReadTime,
InvalidGenesisSnapshot(Slot),
AncestorUnknown { ancestor_slot: Slot },
UninitializedBestJustifiedBalances,
FailedToReadBlock(StoreError),
MissingBlock(Hash256),
FailedToReadState(StoreError),
MissingState(Hash256),
InvalidPersistedBytes(ssz::DecodeError),
BeaconStateError(BeaconStateError),
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Self {
Error::BeaconStateError(e)
}
}
/// The number of validator balance sets that are cached within `BalancesCache`.
const MAX_BALANCE_CACHE_SIZE: usize = 4;
/// Returns the effective balances for every validator in the given `state`.
///
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
/// zero.
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
state
.validators
.iter()
.map(|validator| {
if validator.is_active_at(state.current_epoch()) {
validator.effective_balance
} else {
0
}
})
.collect()
}
/// An item that is stored in the `BalancesCache`.
#[derive(PartialEq, Clone, Debug, Encode, Decode)]
struct CacheItem {
/// The block root at which `self.balances` are valid.
block_root: Hash256,
/// The effective balances from a `BeaconState` validator registry.
balances: Vec<u64>,
}
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
/// checkpoint.
///
/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`.
#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)]
struct BalancesCache {
items: Vec<CacheItem>,
}
impl BalancesCache {
/// Inspect the given `state` and determine the root of the block at the first slot of
/// `state.current_epoch`. If there is not already some entry for the given block root, then
/// add the effective balances from the `state` to the cache.
pub fn process_state<E: EthSpec>(
&mut self,
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
// We are only interested in balances from states that are at the start of an epoch,
// because this is where the `current_justified_checkpoint.root` will point.
if !Self::is_first_block_in_epoch(block_root, state)? {
return Ok(());
}
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
block_root
} else {
// This call remains sensible as long as `state.block_roots` is larger than a single
// epoch.
*state.get_block_root(epoch_boundary_slot)?
};
if self.position(epoch_boundary_root).is_none() {
let item = CacheItem {
block_root: epoch_boundary_root,
balances: get_effective_balances(state),
};
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
self.items.remove(0);
}
self.items.push(item);
}
Ok(())
}
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
/// the epoch of the given `state`.
///
/// We can determine if it is the first block by looking back through `state.block_roots` to
/// see if there is a block in the current epoch with a different root.
fn is_first_block_in_epoch<E: EthSpec>(
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<bool, Error> {
let mut prior_block_found = false;
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
if slot < state.slot {
if *state.get_block_root(slot)? != block_root {
prior_block_found = true;
break;
}
} else {
break;
}
}
Ok(!prior_block_found)
}
fn position(&self, block_root: Hash256) -> Option<usize> {
self.items
.iter()
.position(|item| item.block_root == block_root)
}
/// Get the balances for the given `block_root`, if any.
///
/// If some balances are found, they are removed from the cache.
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
let i = self.position(block_root)?;
Some(self.items.remove(i).balances)
}
}
/// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the
/// `fork_choice::ForkChoice` struct.
#[derive(Debug)]
pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
store: Arc<HotColdDB<E, Hot, Cold>>,
balances_cache: BalancesCache,
time: Slot,
finalized_checkpoint: Checkpoint,
justified_checkpoint: Checkpoint,
justified_balances: Vec<u64>,
best_justified_checkpoint: Checkpoint,
_phantom: PhantomData<E>,
}
impl<E, Hot, Cold> PartialEq for BeaconForkChoiceStore<E, Hot, Cold>
where
E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{
/// This implementation ignores the `store` and `slot_clock`.
fn eq(&self, other: &Self) -> bool {
self.balances_cache == other.balances_cache
&& self.time == other.time
&& self.finalized_checkpoint == other.finalized_checkpoint
&& self.justified_checkpoint == other.justified_checkpoint
&& self.justified_balances == other.justified_balances
&& self.best_justified_checkpoint == other.best_justified_checkpoint
}
}
impl<E, Hot, Cold> BeaconForkChoiceStore<E, Hot, Cold>
where
E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{
/// Initialize `Self` from some `anchor` checkpoint which may or may not be the genesis state.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_forkchoice_store
///
/// ## Notes:
///
/// It is assumed that `anchor` is already persisted in `store`.
pub fn get_forkchoice_store(
store: Arc<HotColdDB<E, Hot, Cold>>,
anchor: &BeaconSnapshot<E>,
) -> Self {
let anchor_state = &anchor.beacon_state;
let mut anchor_block_header = anchor_state.latest_block_header.clone();
if anchor_block_header.state_root == Hash256::zero() {
anchor_block_header.state_root = anchor.beacon_state_root;
}
let anchor_root = anchor_block_header.canonical_root();
let anchor_epoch = anchor_state.current_epoch();
let justified_checkpoint = Checkpoint {
epoch: anchor_epoch,
root: anchor_root,
};
let finalized_checkpoint = justified_checkpoint;
Self {
store,
balances_cache: <_>::default(),
time: anchor_state.slot,
justified_checkpoint,
justified_balances: anchor_state.balances.clone().into(),
finalized_checkpoint,
best_justified_checkpoint: justified_checkpoint,
_phantom: PhantomData,
}
}
/// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the
/// on-disk database.
pub fn to_persisted(&self) -> PersistedForkChoiceStore {
PersistedForkChoiceStore {
balances_cache: self.balances_cache.clone(),
time: self.time,
finalized_checkpoint: self.finalized_checkpoint,
justified_checkpoint: self.justified_checkpoint,
justified_balances: self.justified_balances.clone(),
best_justified_checkpoint: self.best_justified_checkpoint,
}
}
/// Restore `Self` from a previously-generated `PersistedForkChoiceStore`.
pub fn from_persisted(
persisted: PersistedForkChoiceStore,
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Result<Self, Error> {
Ok(Self {
store,
balances_cache: persisted.balances_cache,
time: persisted.time,
finalized_checkpoint: persisted.finalized_checkpoint,
justified_checkpoint: persisted.justified_checkpoint,
justified_balances: persisted.justified_balances,
best_justified_checkpoint: persisted.best_justified_checkpoint,
_phantom: PhantomData,
})
}
}
impl<E, Hot, Cold> ForkChoiceStore<E> for BeaconForkChoiceStore<E, Hot, Cold>
where
E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{
type Error = Error;
fn get_current_slot(&self) -> Slot {
self.time
}
fn set_current_slot(&mut self, slot: Slot) {
self.time = slot
}
fn on_verified_block(
&mut self,
_block: &BeaconBlock<E>,
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<(), Self::Error> {
self.balances_cache.process_state(block_root, state)
}
fn justified_checkpoint(&self) -> &Checkpoint {
&self.justified_checkpoint
}
fn justified_balances(&self) -> &[u64] {
&self.justified_balances
}
fn best_justified_checkpoint(&self) -> &Checkpoint {
&self.best_justified_checkpoint
}
fn finalized_checkpoint(&self) -> &Checkpoint {
&self.finalized_checkpoint
}
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
self.finalized_checkpoint = checkpoint
}
fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Error> {
self.justified_checkpoint = checkpoint;
if let Some(balances) = self.balances_cache.get(self.justified_checkpoint.root) {
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
self.justified_balances = balances;
} else {
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let justified_block = self
.store
.get_item::<SignedBeaconBlock<E>>(&self.justified_checkpoint.root)
.map_err(Error::FailedToReadBlock)?
.ok_or_else(|| Error::MissingBlock(self.justified_checkpoint.root))?
.message;
self.justified_balances = self
.store
.get_state(&justified_block.state_root, Some(justified_block.slot))
.map_err(Error::FailedToReadState)?
.ok_or_else(|| Error::MissingState(justified_block.state_root))?
.balances
.into();
}
Ok(())
}
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
self.best_justified_checkpoint = checkpoint
}
}
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
#[derive(Encode, Decode)]
pub struct PersistedForkChoiceStore {
balances_cache: BalancesCache,
time: Slot,
finalized_checkpoint: Checkpoint,
justified_checkpoint: Checkpoint,
justified_balances: Vec<u64>,
best_justified_checkpoint: Checkpoint,
}

View File

@ -530,7 +530,11 @@ impl<T: BeaconChainTypes> FullyVerifiedBlock<T> {
// because it will revert finalization. Note that the finalized block is stored in fork
// choice, so we will not reject any child of the finalized block (this is relevant during
// genesis).
if !chain.fork_choice.contains_block(&block.parent_root()) {
if !chain
.fork_choice
.read()
.contains_block(&block.parent_root())
{
return Err(BlockError::ParentUnknown(block.parent_root()));
}
@ -727,7 +731,7 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
// Check if the block is already known. We know it is post-finalization, so it is
// sufficient to check the fork choice.
if chain.fork_choice.contains_block(&block_root) {
if chain.fork_choice.read().contains_block(&block_root) {
return Err(BlockError::BlockIsAlreadyKnown);
}
@ -767,7 +771,7 @@ fn load_parent<T: BeaconChainTypes>(
// because it will revert finalization. Note that the finalized block is stored in fork
// choice, so we will not reject any child of the finalized block (this is relevant during
// genesis).
if !chain.fork_choice.contains_block(&block.parent_root) {
if !chain.fork_choice.read().contains_block(&block.parent_root) {
return Err(BlockError::ParentUnknown(block.parent_root));
}

View File

@ -3,21 +3,22 @@ use crate::beacon_chain::{
};
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
use crate::events::NullEventHandler;
use crate::fork_choice::SszForkChoice;
use crate::head_tracker::HeadTracker;
use crate::migrate::Migrate;
use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::persisted_fork_choice::PersistedForkChoice;
use crate::shuffling_cache::ShufflingCache;
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
use crate::timeout_rw_lock::TimeoutRwLock;
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::{
BeaconChain, BeaconChainTypes, BeaconSnapshot, Eth1Chain, Eth1ChainBackend, EventHandler,
ForkChoice,
BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain,
Eth1ChainBackend, EventHandler,
};
use eth1::Config as Eth1Config;
use fork_choice::ForkChoice;
use operation_pool::{OperationPool, PersistedOperationPool};
use proto_array_fork_choice::ProtoArrayForkChoice;
use parking_lot::RwLock;
use slog::{info, Logger};
use slot_clock::{SlotClock, TestingSlotClock};
use std::marker::PhantomData;
@ -99,7 +100,6 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
pub finalized_snapshot: Option<BeaconSnapshot<T::EthSpec>>,
genesis_block_root: Option<Hash256>,
op_pool: Option<OperationPool<T::EthSpec>>,
fork_choice: Option<ForkChoice<T>>,
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
event_handler: Option<T::EventHandler>,
slot_clock: Option<T::SlotClock>,
@ -145,7 +145,6 @@ where
finalized_snapshot: None,
genesis_block_root: None,
op_pool: None,
fork_choice: None,
eth1_chain: None,
event_handler: None,
slot_clock: None,
@ -284,8 +283,8 @@ where
store
.get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
.map(|persisted| persisted.into_operation_pool(&head_state, &self.spec))
.unwrap_or_else(|| OperationPool::new()),
.map(PersistedOperationPool::into_operation_pool)
.unwrap_or_else(OperationPool::new),
);
let finalized_block_root = head_state.finalized_checkpoint.root;
@ -423,6 +422,13 @@ where
let log = self
.log
.ok_or_else(|| "Cannot build without a logger".to_string())?;
let slot_clock = self
.slot_clock
.ok_or_else(|| "Cannot build without a slot_clock.".to_string())?;
let store = self
.store
.clone()
.ok_or_else(|| "Cannot build without a store.".to_string())?;
// If this beacon chain is being loaded from disk, use the stored head. Otherwise, just use
// the finalized checkpoint (which is probably genesis).
@ -454,17 +460,33 @@ where
.map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e))
})?;
let persisted_fork_choice = store
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
let fork_choice = if let Some(persisted) = persisted_fork_choice {
let fc_store =
BeaconForkChoiceStore::from_persisted(persisted.fork_choice_store, store.clone())
.map_err(|e| format!("Unable to load ForkChoiceStore: {:?}", e))?;
ForkChoice::from_persisted(persisted.fork_choice, fc_store)
.map_err(|e| format!("Unable to parse persisted fork choice from disk: {:?}", e))?
} else {
let genesis = &canonical_head;
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), genesis);
ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?
};
let beacon_chain = BeaconChain {
spec: self.spec,
store: self
.store
.ok_or_else(|| "Cannot build without store".to_string())?,
store,
store_migrator: self
.store_migrator
.ok_or_else(|| "Cannot build without store migrator".to_string())?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build without slot clock".to_string())?,
slot_clock,
op_pool: self
.op_pool
.ok_or_else(|| "Cannot build without op pool".to_string())?,
@ -478,15 +500,17 @@ where
observed_aggregators: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_block_producers: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(),
eth1_chain: self.eth1_chain,
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
genesis_block_root: self
.genesis_block_root
.ok_or_else(|| "Cannot build without a genesis block root".to_string())?,
fork_choice: self
.fork_choice
.ok_or_else(|| "Cannot build without a fork choice".to_string())?,
fork_choice: RwLock::new(fork_choice),
event_handler: self
.event_handler
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
@ -517,78 +541,6 @@ where
}
}
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder<
Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
>
where
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{
/// Initializes a fork choice with the `ThreadSafeReducedTree` backend.
///
/// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to
/// the database. Otherwise, create a new, empty fork choice.
pub fn reduced_tree_fork_choice(mut self) -> Result<Self, String> {
let store = self
.store
.clone()
.ok_or_else(|| "reduced_tree_fork_choice requires a store.".to_string())?;
let persisted_fork_choice = store
.get_item::<SszForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
let fork_choice = if let Some(persisted) = persisted_fork_choice {
ForkChoice::from_ssz_container(persisted)
.map_err(|e| format!("Unable to read persisted fork choice from disk: {:?}", e))?
} else {
let finalized_snapshot = &self
.finalized_snapshot
.as_ref()
.ok_or_else(|| "reduced_tree_fork_choice requires a finalized_snapshot")?;
let genesis_block_root = self
.genesis_block_root
.ok_or_else(|| "reduced_tree_fork_choice requires a genesis_block_root")?;
let backend = ProtoArrayForkChoice::new(
finalized_snapshot.beacon_block.message.slot,
finalized_snapshot.beacon_block.message.state_root,
// Note: here we set the `justified_epoch` to be the same as the epoch of the
// finalized checkpoint. Whilst this finalized checkpoint may actually point to
// a _later_ justified checkpoint, that checkpoint won't yet exist in the fork
// choice.
finalized_snapshot.beacon_state.current_epoch(),
finalized_snapshot.beacon_state.current_epoch(),
finalized_snapshot.beacon_block_root,
)?;
ForkChoice::new(
backend,
genesis_block_root,
&finalized_snapshot.beacon_state,
)
};
self.fork_choice = Some(fork_choice);
Ok(self)
}
}
impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder<
Witness<
@ -621,7 +573,8 @@ where
.as_ref()
.ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?;
let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone());
let backend =
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone());
let mut eth1_chain = Eth1Chain::new(backend);
eth1_chain.use_dummy_backend = true;
@ -772,8 +725,6 @@ mod test {
.null_event_handler()
.testing_slot_clock(Duration::from_secs(1))
.expect("should configure testing slot clock")
.reduced_tree_fork_choice()
.expect("should add fork choice to builder")
.build()
.expect("should build");

View File

@ -1,5 +1,5 @@
use crate::beacon_chain::ForkChoiceError;
use crate::eth1_chain::Error as Eth1ChainError;
use crate::fork_choice::Error as ForkChoiceError;
use crate::naive_aggregation_pool::Error as NaiveAggregationError;
use crate::observed_attestations::Error as ObservedAttestationsError;
use crate::observed_attesters::Error as ObservedAttestersError;
@ -10,8 +10,12 @@ use ssz::DecodeError;
use ssz_types::Error as SszTypesError;
use state_processing::{
block_signature_verifier::Error as BlockSignatureVerifierError,
per_block_processing::errors::AttestationValidationError,
signature_sets::Error as SignatureSetError, BlockProcessingError, SlotProcessingError,
per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
ProposerSlashingValidationError,
},
signature_sets::Error as SignatureSetError,
BlockProcessingError, SlotProcessingError,
};
use std::time::Duration;
use types::*;
@ -50,6 +54,9 @@ pub enum BeaconChainError {
},
CannotAttestToFutureState,
AttestationValidationError(AttestationValidationError),
ExitValidationError(ExitValidationError),
ProposerSlashingValidationError(ProposerSlashingValidationError),
AttesterSlashingValidationError(AttesterSlashingValidationError),
StateSkipTooLarge {
start_slot: Slot,
requested_slot: Slot,
@ -78,6 +85,9 @@ pub enum BeaconChainError {
easy_from_to!(SlotProcessingError, BeaconChainError);
easy_from_to!(AttestationValidationError, BeaconChainError);
easy_from_to!(ExitValidationError, BeaconChainError);
easy_from_to!(ProposerSlashingValidationError, BeaconChainError);
easy_from_to!(AttesterSlashingValidationError, BeaconChainError);
easy_from_to!(SszTypesError, BeaconChainError);
easy_from_to!(OpPoolError, BeaconChainError);
easy_from_to!(NaiveAggregationError, BeaconChainError);

View File

@ -143,9 +143,10 @@ where
ssz_container: &SszEth1,
config: Eth1Config,
log: &Logger,
spec: ChainSpec,
) -> Result<Self, String> {
let backend =
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone())?;
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?;
Ok(Self {
use_dummy_backend: ssz_container.use_dummy_backend,
backend,
@ -191,7 +192,12 @@ pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
fn as_bytes(&self) -> Vec<u8>;
/// Create a `Eth1ChainBackend` instance given encoded bytes.
fn from_bytes(bytes: &[u8], config: Eth1Config, log: Logger) -> Result<Self, String>;
fn from_bytes(
bytes: &[u8],
config: Eth1Config,
log: Logger,
spec: ChainSpec,
) -> Result<Self, String>;
}
/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data.
@ -234,7 +240,12 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
}
/// Create dummy eth1 backend.
fn from_bytes(_bytes: &[u8], _config: Eth1Config, _log: Logger) -> Result<Self, String> {
fn from_bytes(
_bytes: &[u8],
_config: Eth1Config,
_log: Logger,
_spec: ChainSpec,
) -> Result<Self, String> {
Ok(Self(PhantomData))
}
}
@ -261,9 +272,9 @@ impl<T: EthSpec> CachingEth1Backend<T> {
/// Instantiates `self` with empty caches.
///
/// Does not connect to the eth1 node or start any tasks to keep the cache updated.
pub fn new(config: Eth1Config, log: Logger) -> Self {
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self {
Self {
core: HttpService::new(config, log.clone()),
core: HttpService::new(config, log.clone(), spec),
log,
_phantom: PhantomData,
}
@ -389,8 +400,13 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
}
/// Recover the cached backend from encoded bytes.
fn from_bytes(bytes: &[u8], config: Eth1Config, log: Logger) -> Result<Self, String> {
let inner = HttpService::from_bytes(bytes, config, log.clone())?;
fn from_bytes(
bytes: &[u8],
config: Eth1Config,
log: Logger,
spec: ChainSpec,
) -> Result<Self, String> {
let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?;
Ok(Self {
core: inner,
log,
@ -549,7 +565,10 @@ mod test {
mod eth1_chain_json_backend {
use super::*;
use eth1::DepositLog;
use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder};
use types::{
test_utils::{generate_deterministic_keypair, TestingDepositBuilder},
EthSpec, MainnetEthSpec,
};
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E>, E> {
let eth1_config = Eth1Config {
@ -557,7 +576,11 @@ mod test {
};
let log = null_logger().unwrap();
Eth1Chain::new(CachingEth1Backend::new(eth1_config, log))
Eth1Chain::new(CachingEth1Backend::new(
eth1_config,
log,
MainnetEthSpec::default_spec(),
))
}
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
@ -571,6 +594,7 @@ mod test {
deposit_data,
block_number: i,
index: i,
signature_is_valid: true,
}
}

View File

@ -1,300 +0,0 @@
mod checkpoint_manager;
use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes};
use checkpoint_manager::{get_effective_balances, CheckpointManager, CheckpointWithBalances};
use parking_lot::{RwLock, RwLockReadGuard};
use proto_array_fork_choice::{core::ProtoArray, ProtoArrayForkChoice};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use state_processing::common::get_indexed_attestation;
use std::marker::PhantomData;
use store::{DBColumn, Error as StoreError, StoreItem};
use types::{BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256, IndexedAttestation, Slot};
type Result<T> = std::result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
MissingBlock(Hash256),
MissingState(Hash256),
BackendError(String),
BeaconStateError(BeaconStateError),
StoreError(StoreError),
BeaconChainError(Box<BeaconChainError>),
UnknownBlockSlot(Hash256),
UnknownJustifiedBlock(Hash256),
UnknownJustifiedState(Hash256),
UnableToJsonEncode(String),
InvalidAttestation,
}
pub struct ForkChoice<T: BeaconChainTypes> {
backend: ProtoArrayForkChoice,
/// Used for resolving the `0x00..00` alias back to genesis.
///
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
/// whenever the struct was instantiated.
genesis_block_root: Hash256,
checkpoint_manager: RwLock<CheckpointManager>,
_phantom: PhantomData<T>,
}
impl<T: BeaconChainTypes> PartialEq for ForkChoice<T> {
/// This implementation ignores the `store`.
fn eq(&self, other: &Self) -> bool {
self.backend == other.backend
&& self.genesis_block_root == other.genesis_block_root
&& *self.checkpoint_manager.read() == *other.checkpoint_manager.read()
}
}
impl<T: BeaconChainTypes> ForkChoice<T> {
/// Instantiate a new fork chooser.
///
/// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized
/// block.
pub fn new(
backend: ProtoArrayForkChoice,
genesis_block_root: Hash256,
genesis_state: &BeaconState<T::EthSpec>,
) -> Self {
let genesis_checkpoint = CheckpointWithBalances {
epoch: genesis_state.current_epoch(),
root: genesis_block_root,
balances: get_effective_balances(genesis_state),
};
Self {
backend,
genesis_block_root,
checkpoint_manager: RwLock::new(CheckpointManager::new(genesis_checkpoint)),
_phantom: PhantomData,
}
}
/// Run the fork choice rule to determine the head.
pub fn find_head(&self, chain: &BeaconChain<T>) -> Result<Hash256> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES);
let remove_alias = |root| {
if root == Hash256::zero() {
self.genesis_block_root
} else {
root
}
};
let mut manager = self.checkpoint_manager.write();
manager.maybe_update(chain.slot()?, chain)?;
let result = self
.backend
.find_head(
manager.current.justified.epoch,
remove_alias(manager.current.justified.root),
manager.current.finalized.epoch,
&manager.current.justified.balances,
)
.map_err(Into::into);
metrics::stop_timer(timer);
result
}
/// Returns true if the given block is known to fork choice.
pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.backend.contains_block(block_root)
}
/// Returns the state root for the given block root.
pub fn block_slot_and_state_root(&self, block_root: &Hash256) -> Option<(Slot, Hash256)> {
self.backend.block_slot_and_state_root(block_root)
}
/// Process all attestations in the given `block`.
///
/// Assumes the block (and therefore its attestations) are valid. It is a logic error to
/// provide an invalid block.
pub fn process_block(
&self,
chain: &BeaconChain<T>,
state: &BeaconState<T::EthSpec>,
block: &BeaconBlock<T::EthSpec>,
block_root: Hash256,
) -> Result<()> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
self.checkpoint_manager
.write()
.process_state(block_root, state, chain, &self.backend)?;
self.checkpoint_manager
.write()
.maybe_update(chain.slot()?, chain)?;
// Note: we never count the block as a latest message, only attestations.
for attestation in &block.body.attestations {
// If the `data.beacon_block_root` block is not known to the fork choice, simply ignore
// the vote.
if self
.backend
.contains_block(&attestation.data.beacon_block_root)
{
let committee =
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
let indexed_attestation =
get_indexed_attestation(committee.committee, &attestation)
.map_err(|_| Error::InvalidAttestation)?;
self.process_indexed_attestation(&indexed_attestation)?;
}
}
// This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes.
self.backend.process_block(
block.slot,
block_root,
block.parent_root,
block.state_root,
state.current_justified_checkpoint.epoch,
state.finalized_checkpoint.epoch,
)?;
metrics::stop_timer(timer);
Ok(())
}
/// Process an attestation which references `block` in `attestation.data.beacon_block_root`.
///
/// Assumes the attestation is valid.
pub fn process_indexed_attestation(
&self,
attestation: &IndexedAttestation<T::EthSpec>,
) -> Result<()> {
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
let block_hash = attestation.data.beacon_block_root;
// Ignore any attestations to the zero hash.
//
// This is an edge case that results from the spec aliasing the zero hash to the genesis
// block. Attesters may attest to the zero hash if they have never seen a block.
//
// We have two options here:
//
// 1. Apply all zero-hash attestations to the zero hash.
// 2. Ignore all attestations to the zero hash.
//
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
// fine because votes to the genesis block are not useful; all validators implicitly attest
// to genesis just by being present in the chain.
//
// Additionally, don't add any block hash to fork choice unless we have imported the block.
if block_hash != Hash256::zero() {
for validator_index in attestation.attesting_indices.iter() {
self.backend.process_attestation(
*validator_index as usize,
block_hash,
attestation.data.target.epoch,
)?;
}
}
metrics::stop_timer(timer);
Ok(())
}
/// Returns the latest message for a given validator, if any.
///
/// Returns `(block_root, block_slot)`.
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
self.backend.latest_message(validator_index)
}
/// Trigger a prune on the underlying fork choice backend.
pub fn prune(&self) -> Result<()> {
let finalized_root = self.checkpoint_manager.read().current.finalized.root;
self.backend.maybe_prune(finalized_root).map_err(Into::into)
}
/// Returns a read-lock to the core `ProtoArray` struct.
///
/// Should only be used when encoding/decoding during troubleshooting.
pub fn core_proto_array(&self) -> RwLockReadGuard<ProtoArray> {
self.backend.core_proto_array()
}
/// Returns a `SszForkChoice` which contains the current state of `Self`.
pub fn as_ssz_container(&self) -> SszForkChoice {
SszForkChoice {
genesis_block_root: self.genesis_block_root.clone(),
checkpoint_manager: self.checkpoint_manager.read().clone(),
backend_bytes: self.backend.as_bytes(),
}
}
/// Instantiates `Self` from a prior `SszForkChoice`.
///
/// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`.
pub fn from_ssz_container(ssz_container: SszForkChoice) -> Result<Self> {
let backend = ProtoArrayForkChoice::from_bytes(&ssz_container.backend_bytes)?;
Ok(Self {
backend,
genesis_block_root: ssz_container.genesis_block_root,
checkpoint_manager: RwLock::new(ssz_container.checkpoint_manager),
_phantom: PhantomData,
})
}
}
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
///
/// This is used when persisting the state of the `BeaconChain` to disk.
#[derive(Encode, Decode, Clone)]
pub struct SszForkChoice {
genesis_block_root: Hash256,
checkpoint_manager: CheckpointManager,
backend_bytes: Vec<u8>,
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<BeaconChainError> for Error {
fn from(e: BeaconChainError) -> Error {
Error::BeaconChainError(Box::new(e))
}
}
impl From<StoreError> for Error {
fn from(e: StoreError) -> Error {
Error::StoreError(e)
}
}
impl From<String> for Error {
fn from(e: String) -> Error {
Error::BackendError(e)
}
}
impl StoreItem for SszForkChoice {
fn db_column() -> DBColumn {
DBColumn::ForkChoice
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, StoreError> {
Self::from_ssz_bytes(bytes).map_err(Into::into)
}
}

View File

@ -1,340 +0,0 @@
use super::Error;
use crate::{metrics, BeaconChain, BeaconChainTypes};
use proto_array_fork_choice::ProtoArrayForkChoice;
use ssz_derive::{Decode, Encode};
use types::{BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot};
const MAX_BALANCE_CACHE_SIZE: usize = 4;
/// An item that is stored in the `BalancesCache`.
#[derive(PartialEq, Clone, Encode, Decode)]
struct CacheItem {
/// The block root at which `self.balances` are valid.
block_root: Hash256,
/// The `state.balances` list.
balances: Vec<u64>,
}
/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified
/// checkpoint.
///
/// It should store a mapping of `epoch_boundary_block_root -> state.balances`.
#[derive(PartialEq, Clone, Default, Encode, Decode)]
struct BalancesCache {
items: Vec<CacheItem>,
}
impl BalancesCache {
/// Inspect the given `state` and determine the root of the block at the first slot of
/// `state.current_epoch`. If there is not already some entry for the given block root, then
/// add `state.balances` to the cache.
pub fn process_state<E: EthSpec>(
&mut self,
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
// We are only interested in balances from states that are at the start of an epoch,
// because this is where the `current_justified_checkpoint.root` will point.
if !Self::is_first_block_in_epoch(block_root, state)? {
return Ok(());
}
let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch());
let epoch_boundary_root = if epoch_boundary_slot == state.slot {
block_root
} else {
// This call remains sensible as long as `state.block_roots` is larger than a single
// epoch.
*state.get_block_root(epoch_boundary_slot)?
};
if self.position(epoch_boundary_root).is_none() {
let item = CacheItem {
block_root: epoch_boundary_root,
balances: get_effective_balances(state),
};
if self.items.len() == MAX_BALANCE_CACHE_SIZE {
self.items.remove(0);
}
self.items.push(item);
}
Ok(())
}
/// Returns `true` if the given `block_root` is the first/only block to have been processed in
/// the epoch of the given `state`.
///
/// We can determine if it is the first block by looking back through `state.block_roots` to
/// see if there is a block in the current epoch with a different root.
fn is_first_block_in_epoch<E: EthSpec>(
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<bool, Error> {
let mut prior_block_found = false;
for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) {
if slot < state.slot {
if *state.get_block_root(slot)? != block_root {
prior_block_found = true;
break;
}
} else {
break;
}
}
Ok(!prior_block_found)
}
fn position(&self, block_root: Hash256) -> Option<usize> {
self.items
.iter()
.position(|item| item.block_root == block_root)
}
/// Get the balances for the given `block_root`, if any.
///
/// If some balances are found, they are removed from the cache.
pub fn get(&mut self, block_root: Hash256) -> Option<Vec<u64>> {
let i = self.position(block_root)?;
Some(self.items.remove(i).balances)
}
}
/// Returns the effective balances for every validator in the given `state`.
///
/// Any validator who is not active in the epoch of the given `state` is assigned a balance of
/// zero.
pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
state
.validators
.iter()
.map(|validator| {
if validator.is_active_at(state.current_epoch()) {
validator.effective_balance
} else {
0
}
})
.collect()
}
/// A `types::Checkpoint` that also stores the validator balances from a `BeaconState`.
///
/// Useful because we need to track the justified checkpoint balances.
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct CheckpointWithBalances {
pub epoch: Epoch,
pub root: Hash256,
/// These are the balances of the state with `self.root`.
///
/// Importantly, these are _not_ the balances of the first state that we saw that has
/// `self.epoch` and `self.root` as `state.current_justified_checkpoint`. These are the
/// balances of the state from the block with `state.current_justified_checkpoint.root`.
pub balances: Vec<u64>,
}
impl Into<Checkpoint> for CheckpointWithBalances {
fn into(self) -> Checkpoint {
Checkpoint {
epoch: self.epoch,
root: self.root,
}
}
}
/// A pair of checkpoints, representing `state.current_justified_checkpoint` and
/// `state.finalized_checkpoint` for some `BeaconState`.
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct FFGCheckpoints {
pub justified: CheckpointWithBalances,
pub finalized: Checkpoint,
}
/// A struct to manage the justified and finalized checkpoints to be used for `ForkChoice`.
///
/// This struct exists to manage the `should_update_justified_checkpoint` logic in the fork choice
/// section of the spec:
///
/// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#should_update_justified_checkpoint
#[derive(PartialEq, Clone, Encode, Decode)]
pub struct CheckpointManager {
/// The current FFG checkpoints that should be used for finding the head.
pub current: FFGCheckpoints,
/// The best-known checkpoints that should be moved to `self.current` when the time is right.
best: FFGCheckpoints,
/// The epoch at which `self.current` should become `self.best`, if any.
update_at: Option<Epoch>,
/// A cached used to try and avoid DB reads when updating `self.current` and `self.best`.
balances_cache: BalancesCache,
}
impl CheckpointManager {
/// Create a new checkpoint cache from `genesis_checkpoint` derived from the genesis block.
pub fn new(genesis_checkpoint: CheckpointWithBalances) -> Self {
let ffg_checkpoint = FFGCheckpoints {
justified: genesis_checkpoint.clone(),
finalized: genesis_checkpoint.into(),
};
Self {
current: ffg_checkpoint.clone(),
best: ffg_checkpoint,
update_at: None,
balances_cache: BalancesCache::default(),
}
}
/// Potentially updates `self.current`, if the conditions are correct.
///
/// Should be called before running the fork choice `find_head` function to ensure
/// `self.current` is up-to-date.
pub fn maybe_update<T: BeaconChainTypes>(
&mut self,
current_slot: Slot,
chain: &BeaconChain<T>,
) -> Result<(), Error> {
if self.best.justified.epoch > self.current.justified.epoch {
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
match self.update_at {
None => {
if self.best.justified.epoch > self.current.justified.epoch {
if Self::compute_slots_since_epoch_start::<T>(current_slot)
< chain.spec.safe_slots_to_update_justified
{
self.current = self.best.clone();
} else {
self.update_at = Some(current_epoch + 1)
}
}
}
Some(epoch) if epoch <= current_epoch => {
self.current = self.best.clone();
self.update_at = None
}
_ => {}
}
}
Ok(())
}
/// Checks the given `state` (must correspond to the given `block_root`) to see if it contains
/// a `current_justified_checkpoint` that is better than `self.best_justified_checkpoint`. If
/// so, the value is updated.
///
/// Note: this does not update `self.justified_checkpoint`.
pub fn process_state<T: BeaconChainTypes>(
&mut self,
block_root: Hash256,
state: &BeaconState<T::EthSpec>,
chain: &BeaconChain<T>,
proto_array: &ProtoArrayForkChoice,
) -> Result<(), Error> {
// Only proceed if the new checkpoint is better than our current checkpoint.
if state.current_justified_checkpoint.epoch > self.current.justified.epoch
&& state.finalized_checkpoint.epoch >= self.current.finalized.epoch
{
let candidate = FFGCheckpoints {
justified: CheckpointWithBalances {
epoch: state.current_justified_checkpoint.epoch,
root: state.current_justified_checkpoint.root,
balances: self
.get_balances_for_block(state.current_justified_checkpoint.root, chain)?,
},
finalized: state.finalized_checkpoint.clone(),
};
// Using the given `state`, determine its ancestor at the slot of our current justified
// epoch. Later, this will be compared to the root of the current justified checkpoint
// to determine if this state is descendant of our current justified state.
let new_checkpoint_ancestor = Self::get_block_root_at_slot(
state,
chain,
candidate.justified.root,
self.current
.justified
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
)?;
let candidate_justified_block_slot = proto_array
.block_slot(&candidate.justified.root)
.ok_or_else(|| Error::UnknownBlockSlot(candidate.justified.root))?;
// If the new justified checkpoint is an ancestor of the current justified checkpoint,
// it is always safe to change it.
if new_checkpoint_ancestor == Some(self.current.justified.root)
&& candidate_justified_block_slot
>= candidate
.justified
.epoch
.start_slot(T::EthSpec::slots_per_epoch())
{
self.current = candidate.clone()
}
if candidate.justified.epoch > self.best.justified.epoch {
// Always update the best checkpoint, if it's better.
self.best = candidate;
}
// Add the state's balances to the balances cache to avoid a state read later.
self.balances_cache.process_state(block_root, state)?;
}
Ok(())
}
fn get_balances_for_block<T: BeaconChainTypes>(
&mut self,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Vec<u64>, Error> {
if let Some(balances) = self.balances_cache.get(block_root) {
metrics::inc_counter(&metrics::BALANCES_CACHE_HITS);
Ok(balances)
} else {
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let block = chain
.get_block(&block_root)?
.ok_or_else(|| Error::UnknownJustifiedBlock(block_root))?;
let state = chain
.get_state(&block.state_root(), Some(block.slot()))?
.ok_or_else(|| Error::UnknownJustifiedState(block.state_root()))?;
Ok(get_effective_balances(&state))
}
}
/// Attempts to get the block root for the given `slot`.
///
/// First, the `state` is used to see if the slot is within the distance of its historical
/// lists. Then, the `chain` is used which will anchor the search at the given
/// `justified_root`.
fn get_block_root_at_slot<T: BeaconChainTypes>(
state: &BeaconState<T::EthSpec>,
chain: &BeaconChain<T>,
justified_root: Hash256,
slot: Slot,
) -> Result<Option<Hash256>, Error> {
match state.get_block_root(slot) {
Ok(root) => Ok(Some(*root)),
Err(_) => chain
.get_ancestor_block_root(justified_root, slot)
.map_err(Into::into),
}
}
/// Calculate how far `slot` lies from the start of its epoch.
fn compute_slots_since_epoch_start<T: BeaconChainTypes>(slot: Slot) -> u64 {
let slots_per_epoch = T::EthSpec::slots_per_epoch();
(slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64()
}
}

View File

@ -4,13 +4,13 @@ extern crate lazy_static;
pub mod attestation_verification;
mod beacon_chain;
mod beacon_fork_choice_store;
mod beacon_snapshot;
mod block_verification;
pub mod builder;
mod errors;
pub mod eth1_chain;
pub mod events;
mod fork_choice;
mod head_tracker;
mod metrics;
pub mod migrate;
@ -18,7 +18,9 @@ mod naive_aggregation_pool;
mod observed_attestations;
mod observed_attesters;
mod observed_block_producers;
pub mod observed_operations;
mod persisted_beacon_chain;
mod persisted_fork_choice;
mod shuffling_cache;
mod snapshot_cache;
pub mod test_utils;
@ -27,15 +29,15 @@ mod validator_pubkey_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, ChainSegmentResult,
StateSkipConfig,
ForkChoiceError, StateSkipConfig,
};
pub use self::beacon_snapshot::BeaconSnapshot;
pub use self::errors::{BeaconChainError, BlockProductionError};
pub use attestation_verification::Error as AttestationError;
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
pub use block_verification::{BlockError, BlockProcessingOutcome, GossipVerifiedBlock};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
pub use events::EventHandler;
pub use fork_choice::ForkChoice;
pub use metrics::scrape_for_metrics;
pub use parking_lot;
pub use slot_clock;

View File

@ -49,10 +49,6 @@ lazy_static! {
"beacon_block_processing_db_write_seconds",
"Time spent writing a newly processed block and state to DB"
);
pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result<Histogram> = try_create_histogram(
"beacon_block_processing_fork_choice_register_seconds",
"Time spent registering the new block with fork choice (but not finding head)"
);
pub static ref BLOCK_PROCESSING_ATTESTATION_OBSERVATION: Result<Histogram> = try_create_histogram(
"beacon_block_processing_attestation_observation_seconds",
"Time spent hashing and remembering all the attestations in the block"
@ -115,10 +111,6 @@ lazy_static! {
/*
* General Attestation Processing
*/
pub static ref ATTESTATION_PROCESSING_APPLY_TO_FORK_CHOICE: Result<Histogram> = try_create_histogram(
"beacon_attestation_processing_apply_to_fork_choice",
"Time spent applying an attestation to fork choice"
);
pub static ref ATTESTATION_PROCESSING_APPLY_TO_AGG_POOL: Result<Histogram> = try_create_histogram(
"beacon_attestation_processing_apply_to_agg_pool",
"Time spent applying an attestation to the naive aggregation pool"

View File

@ -0,0 +1,104 @@
use derivative::Derivative;
use parking_lot::Mutex;
use smallvec::SmallVec;
use state_processing::{SigVerifiedOp, VerifyOperation};
use std::collections::HashSet;
use std::iter::FromIterator;
use std::marker::PhantomData;
use types::{
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit,
};
/// Number of validator indices to store on the stack in `observed_validators`.
pub const SMALL_VEC_SIZE: usize = 8;
/// Stateful tracker for exit/slashing operations seen on the network.
///
/// Implements the conditions for gossip verification of exits and slashings from the P2P spec.
#[derive(Debug, Derivative)]
#[derivative(Default(bound = "T: ObservableOperation<E>, E: EthSpec"))]
pub struct ObservedOperations<T: ObservableOperation<E>, E: EthSpec> {
/// Indices of validators for whom we have already seen an instance of an operation `T`.
///
/// For voluntary exits, this is the set of all `signed_voluntary_exit.message.validator_index`.
/// For proposer slashings, this is the set of all `proposer_slashing.index`.
/// For attester slashings, this is the set of all validators who would be slashed by
/// previously seen attester slashings, i.e. those validators in the intersection of
/// `attestation_1.attester_indices` and `attestation_2.attester_indices`.
observed_validator_indices: Mutex<HashSet<u64>>,
_phantom: PhantomData<(T, E)>,
}
/// Was the observed operation new and valid for further processing, or a useless duplicate?
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ObservationOutcome<T> {
New(SigVerifiedOp<T>),
AlreadyKnown,
}
/// Trait for exits and slashings which can be observed using `ObservedOperations`.
pub trait ObservableOperation<E: EthSpec>: VerifyOperation<E> + Sized {
/// The set of validator indices involved in this operation.
///
/// See the comment on `observed_validator_indices` above for detail.
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]>;
}
impl<E: EthSpec> ObservableOperation<E> for SignedVoluntaryExit {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
std::iter::once(self.message.validator_index).collect()
}
}
impl<E: EthSpec> ObservableOperation<E> for ProposerSlashing {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
std::iter::once(self.signed_header_1.message.proposer_index).collect()
}
}
impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
let attestation_1_indices =
HashSet::<u64>::from_iter(self.attestation_1.attesting_indices.iter().copied());
let attestation_2_indices =
HashSet::<u64>::from_iter(self.attestation_2.attesting_indices.iter().copied());
attestation_1_indices
.intersection(&attestation_2_indices)
.copied()
.collect()
}
}
impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> {
pub fn verify_and_observe(
&self,
op: T,
head_state: &BeaconState<E>,
spec: &ChainSpec,
) -> Result<ObservationOutcome<T>, T::Error> {
let mut observed_validator_indices = self.observed_validator_indices.lock();
let new_validator_indices = op.observed_validators();
// If all of the new validator indices have been previously observed, short-circuit
// the validation. This implements the uniqueness check part of the spec, which for attester
// slashings reads:
//
// At least one index in the intersection of the attesting indices of each attestation has
// not yet been seen in any prior attester_slashing.
if new_validator_indices
.iter()
.all(|index| observed_validator_indices.contains(index))
{
return Ok(ObservationOutcome::AlreadyKnown);
}
// Validate the op using operation-specific logic (`verify_attester_slashing`, etc).
let verified_op = op.validate(head_state, spec)?;
// Add the relevant indices to the set of known indices to prevent processing of duplicates
// in the future.
observed_validator_indices.extend(new_validator_indices);
Ok(ObservationOutcome::New(verified_op))
}
}

View File

@ -0,0 +1,25 @@
use crate::beacon_fork_choice_store::PersistedForkChoiceStore as ForkChoiceStore;
use fork_choice::PersistedForkChoice as ForkChoice;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use store::{DBColumn, Error, StoreItem};
#[derive(Encode, Decode)]
pub struct PersistedForkChoice {
pub fork_choice: ForkChoice,
pub fork_choice_store: ForkChoiceStore,
}
impl StoreItem for PersistedForkChoice {
fn db_column() -> DBColumn {
DBColumn::ForkChoice
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> std::result::Result<Self, Error> {
Self::from_ssz_bytes(bytes).map_err(Into::into)
}
}

View File

@ -24,7 +24,7 @@ use tree_hash::TreeHash;
use types::{
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec,
Hash256, Keypair, SecretKey, SelectionProof, Signature, SignedAggregateAndProof,
SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, Slot,
SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, Slot, SubnetId,
};
pub use types::test_utils::generate_deterministic_keypairs;
@ -125,8 +125,6 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
.null_event_handler()
.testing_slot_clock(HARNESS_SLOT_TIME)
.expect("should configure testing slot clock")
.reduced_tree_fork_choice()
.expect("should add fork choice to builder")
.build()
.expect("should build");
@ -167,8 +165,6 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
.null_event_handler()
.testing_slot_clock(HARNESS_SLOT_TIME)
.expect("should configure testing slot clock")
.reduced_tree_fork_choice()
.expect("should add fork choice to builder")
.build()
.expect("should build");
@ -207,8 +203,6 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
.null_event_handler()
.testing_slot_clock(Duration::from_secs(1))
.expect("should configure testing slot clock")
.reduced_tree_fork_choice()
.expect("should add fork choice to builder")
.build()
.expect("should build");
@ -250,6 +244,35 @@ where
block_strategy: BlockStrategy,
attestation_strategy: AttestationStrategy,
) -> Hash256 {
let mut i = 0;
self.extend_chain_while(
|_, _| {
i += 1;
i <= num_blocks
},
block_strategy,
attestation_strategy,
)
}
/// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the
/// last-produced block (the head of the chain).
///
/// Chain will be extended while `predidcate` returns `true`.
///
/// The `block_strategy` dictates where the new blocks will be placed.
///
/// The `attestation_strategy` dictates which validators will attest to the newly created
/// blocks.
pub fn extend_chain_while<F>(
&self,
mut predicate: F,
block_strategy: BlockStrategy,
attestation_strategy: AttestationStrategy,
) -> Hash256
where
F: FnMut(&SignedBeaconBlock<E>, &BeaconState<E>) -> bool,
{
let mut state = {
// Determine the slot for the first block (or skipped block).
let state_slot = match block_strategy {
@ -272,13 +295,17 @@ where
let mut head_block_root = None;
for _ in 0..num_blocks {
loop {
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
if !predicate(&block, &new_state) {
break;
}
while self.chain.slot().expect("should have a slot") < slot {
self.advance_slot();
}
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
let block_root = self
.chain
.process_block(block)
@ -296,6 +323,39 @@ where
head_block_root.expect("did not produce any blocks")
}
/// A simple method to produce a block at the current slot without applying it to the chain.
///
/// Always uses `BlockStrategy::OnCanonicalHead`.
pub fn get_block(&self) -> (SignedBeaconBlock<E>, BeaconState<E>) {
let state = self
.chain
.state_at_slot(
self.chain.slot().unwrap() - 1,
StateSkipConfig::WithStateRoots,
)
.unwrap();
let slot = self.chain.slot().unwrap();
self.build_block(state, slot, BlockStrategy::OnCanonicalHead)
}
/// A simple method to produce and process all attestation at the current slot. Always uses
/// `AttestationStrategy::AllValidators`.
pub fn generate_all_attestations(&self) {
let slot = self.chain.slot().unwrap();
let (state, block_root) = {
let head = self.chain.head().unwrap();
(head.beacon_state.clone(), head.beacon_block_root)
};
self.add_attestations_for_slot(
&AttestationStrategy::AllValidators,
&state,
block_root,
slot,
);
}
/// Returns current canonical head slot
pub fn get_chain_slot(&self) -> Slot {
self.chain.slot().unwrap()
@ -476,12 +536,16 @@ where
state: &BeaconState<E>,
head_block_root: Hash256,
attestation_slot: Slot,
) -> Vec<Vec<Attestation<E>>> {
) -> Vec<Vec<(Attestation<E>, SubnetId)>> {
let spec = &self.spec;
let fork = &state.fork;
let attesting_validators = self.get_attesting_validators(attestation_strategy);
let committee_count = state
.get_committee_count_at_slot(state.slot)
.expect("should get committee count");
state
.get_beacon_committees_at_slot(state.slot)
.expect("should get committees")
@ -529,7 +593,14 @@ where
agg_sig
};
Some(attestation)
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
&attestation.data,
committee_count,
&self.chain.spec,
)
.expect("should get subnet_id");
Some((attestation, subnet_id))
})
.collect()
})
@ -574,16 +645,16 @@ where
.into_iter()
.for_each(|committee_attestations| {
// Submit each unaggregated attestation to the chain.
for attestation in &committee_attestations {
for (attestation, subnet_id) in &committee_attestations {
self.chain
.verify_unaggregated_attestation_for_gossip(attestation.clone())
.verify_unaggregated_attestation_for_gossip(attestation.clone(), *subnet_id)
.expect("should not error during attestation processing")
.add_to_pool(&self.chain)
.expect("should add attestation to naive pool");
}
// If there are any attestations in this committee, create an aggregate.
if let Some(attestation) = committee_attestations.first() {
if let Some((attestation, _)) = committee_attestations.first() {
let bc = state.get_beacon_committee(attestation.data.slot, attestation.data.index)
.expect("should get committee");
@ -617,7 +688,7 @@ where
.get_aggregated_attestation(&attestation.data)
.expect("should not error whilst finding aggregate")
.unwrap_or_else(|| {
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, att| {
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
agg.aggregate(att);
agg
})
@ -633,14 +704,16 @@ where
spec,
);
self.chain
let attn = self.chain
.verify_aggregated_attestation_for_gossip(signed_aggregate)
.expect("should not error during attestation processing")
.add_to_pool(&self.chain)
.expect("should add attestation to naive aggregation pool")
.add_to_fork_choice(&self.chain)
.expect("should not error during attestation processing");
self.chain.apply_attestation_to_fork_choice(&attn)
.expect("should add attestation to fork choice");
}
self.chain.add_to_block_inclusion_pool(attn)
.expect("should add attestation to op pool");
}
});
}

View File

@ -14,7 +14,7 @@ use tree_hash::TreeHash;
use types::{
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, EthSpec, Hash256,
Keypair, MainnetEthSpec, SecretKey, SelectionProof, Signature, SignedAggregateAndProof,
SignedBeaconBlock, Unsigned,
SignedBeaconBlock, SubnetId, Unsigned,
};
pub type E = MainnetEthSpec;
@ -49,7 +49,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
/// Also returns some info about who created it.
fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
) -> (Attestation<T::EthSpec>, usize, usize, SecretKey) {
) -> (Attestation<T::EthSpec>, usize, usize, SecretKey, SubnetId) {
let head = chain.head().expect("should get head");
let current_slot = chain.slot().expect("should get slot");
@ -78,11 +78,21 @@ fn get_valid_unaggregated_attestation<T: BeaconChainTypes>(
)
.expect("should sign attestation");
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
&valid_attestation.data,
head.beacon_state
.get_committee_count_at_slot(current_slot)
.expect("should get committee count"),
&chain.spec,
)
.expect("should get subnet_id");
(
valid_attestation,
validator_index,
validator_committee_index,
validator_sk,
subnet_id,
)
}
@ -194,7 +204,7 @@ fn aggregated_gossip_verification() {
"the test requires a new epoch to avoid already-seen errors"
);
let (valid_attestation, _attester_index, _attester_committee_index, validator_sk) =
let (valid_attestation, _attester_index, _attester_committee_index, validator_sk, _subnet_id) =
get_valid_unaggregated_attestation(&harness.chain);
let (valid_aggregate, aggregator_index, aggregator_sk) =
get_valid_aggregated_attestation(&harness.chain, valid_attestation);
@ -222,7 +232,7 @@ fn aggregated_gossip_verification() {
/*
* The following two tests ensure:
*
* Spec v0.11.2
* Spec v0.12.1
*
* aggregate.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
@ -260,13 +270,14 @@ fn aggregated_gossip_verification() {
// slot and the propagation tolerance will allow an extra slot.
earliest_permissible_slot
}
if attestation_slot == early_slot && earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
if attestation_slot == early_slot
&& earliest_permissible_slot == current_slot - E::slots_per_epoch() - 1
);
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block being voted for (aggregate.data.beacon_block_root) passes validation.
*/
@ -285,10 +296,31 @@ fn aggregated_gossip_verification() {
if beacon_block_root == unknown_root
);
/*
* The following test ensures:
*
* Spec v0.12.1
*
* The attestation has participants.
*/
assert_invalid!(
"aggregate with no participants",
{
let mut a = valid_aggregate.clone();
let aggregation_bits = &mut a.message.aggregate.aggregation_bits;
aggregation_bits.difference_inplace(&aggregation_bits.clone());
assert!(aggregation_bits.is_zero());
a.message.aggregate.signature = AggregateSignature::new();
a
},
AttnError::EmptyAggregationBitfield
);
/*
* This test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The aggregator signature, signed_aggregate_and_proof.signature, is valid.
*/
@ -308,7 +340,7 @@ fn aggregated_gossip_verification() {
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The aggregate_and_proof.selection_proof is a valid signature of the aggregate.data.slot by
* the validator with index aggregate_and_proof.aggregator_index.
@ -355,7 +387,7 @@ fn aggregated_gossip_verification() {
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The signature of aggregate is valid.
*/
@ -389,11 +421,11 @@ fn aggregated_gossip_verification() {
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The aggregator's validator index is within the aggregate's committee -- i.e.
* aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data,
* aggregate.aggregation_bits).
* The aggregator's validator index is within the committee -- i.e.
* aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot,
* aggregate.data.index).
*/
let unknown_validator = VALIDATOR_COUNT as u64;
@ -418,7 +450,7 @@ fn aggregated_gossip_verification() {
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot --
* i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index,
@ -428,7 +460,7 @@ fn aggregated_gossip_verification() {
let (non_aggregator_index, non_aggregator_sk) =
get_non_aggregator(&harness.chain, &valid_aggregate.message.aggregate);
assert_invalid!(
"aggregate with from non-aggregator",
"aggregate from non-aggregator",
{
SignedAggregateAndProof::from_aggregate(
non_aggregator_index as u64,
@ -446,6 +478,8 @@ fn aggregated_gossip_verification() {
if index == non_aggregator_index as u64
);
// NOTE: from here on, the tests are stateful, and rely on the valid attestation having been
// seen. A refactor to give each test case its own state might be nice at some point
assert!(
harness
.chain
@ -455,20 +489,17 @@ fn aggregated_gossip_verification() {
);
/*
* The following tests ensures:
* The following test ensures:
*
* NOTE: this is a slight deviation from the spec, see:
* https://github.com/ethereum/eth2.0-specs/pull/1749
* Spec v0.12.1
*
* Spec v0.11.2
*
* The aggregate attestation defined by hash_tree_root(aggregate) has not already been seen
* (via aggregate gossip, within a block, or through the creation of an equivalent aggregate
* locally).
* The valid aggregate attestation defined by hash_tree_root(aggregate) has not already been
* seen (via aggregate gossip, within a block, or through the creation of an equivalent
* aggregate locally).
*/
assert_invalid!(
"aggregate with that has already been seen",
"aggregate that has already been seen",
valid_aggregate.clone(),
AttnError::AttestationAlreadyKnown(hash)
if hash == valid_aggregate.message.aggregate.tree_hash_root()
@ -477,7 +508,7 @@ fn aggregated_gossip_verification() {
/*
* The following test ensures:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The aggregate is the first valid aggregate received for the aggregator with index
* aggregate_and_proof.aggregator_index for the epoch aggregate.data.target.epoch.
@ -520,16 +551,21 @@ fn unaggregated_gossip_verification() {
"the test requires a new epoch to avoid already-seen errors"
);
let (valid_attestation, expected_validator_index, validator_committee_index, validator_sk) =
get_valid_unaggregated_attestation(&harness.chain);
let (
valid_attestation,
expected_validator_index,
validator_committee_index,
validator_sk,
subnet_id,
) = get_valid_unaggregated_attestation(&harness.chain);
macro_rules! assert_invalid {
($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
assert!(
matches!(
harness
.chain
.verify_unaggregated_attestation_for_gossip($attn_getter)
.verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter)
.err()
.expect(&format!(
"{} should error during verify_unaggregated_attestation_for_gossip",
@ -543,10 +579,33 @@ fn unaggregated_gossip_verification() {
};
}
/*
* The following test ensures:
*
* Spec v0.12.1
*
* The attestation is for the correct subnet (i.e. compute_subnet_for_attestation(state,
* attestation.data.slot, attestation.data.index) == subnet_id).
*/
let id: u64 = subnet_id.into();
let invalid_subnet_id = SubnetId::new(id + 1);
assert_invalid!(
"attestation from future slot",
{
valid_attestation.clone()
},
invalid_subnet_id,
AttnError::InvalidSubnetId {
received,
expected,
}
if received == invalid_subnet_id && expected == subnet_id
);
/*
* The following two tests ensure:
*
* Spec v0.11.2
* Spec v0.12.1
*
* attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
* MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. attestation.data.slot +
@ -562,6 +621,7 @@ fn unaggregated_gossip_verification() {
a.data.slot = future_slot;
a
},
subnet_id,
AttnError::FutureSlot {
attestation_slot,
latest_permissible_slot,
@ -581,6 +641,7 @@ fn unaggregated_gossip_verification() {
a.data.slot = early_slot;
a
},
subnet_id,
AttnError::PastSlot {
attestation_slot,
// Subtract an additional slot since the harness will be exactly on the start of the
@ -593,7 +654,7 @@ fn unaggregated_gossip_verification() {
/*
* The following two tests ensure:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The attestation is unaggregated -- that is, it has exactly one participating validator
* (len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1).
@ -613,6 +674,7 @@ fn unaggregated_gossip_verification() {
);
a
},
subnet_id,
AttnError::NotExactlyOneAggregationBitSet(0)
);
@ -625,13 +687,14 @@ fn unaggregated_gossip_verification() {
.expect("should set second aggregation bit");
a
},
subnet_id,
AttnError::NotExactlyOneAggregationBitSet(2)
);
/*
* The following test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block being voted for (attestation.data.beacon_block_root) passes validation.
*/
@ -644,6 +707,7 @@ fn unaggregated_gossip_verification() {
a.data.beacon_block_root = unknown_root;
a
},
subnet_id,
AttnError::UnknownHeadBlock {
beacon_block_root,
}
@ -653,7 +717,7 @@ fn unaggregated_gossip_verification() {
/*
* The following test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The signature of attestation is valid.
*/
@ -669,13 +733,14 @@ fn unaggregated_gossip_verification() {
a
},
subnet_id,
AttnError::InvalidSignature
);
assert!(
harness
.chain
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone())
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id)
.is_ok(),
"valid attestation should be verified"
);
@ -683,7 +748,7 @@ fn unaggregated_gossip_verification() {
/*
* The following test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
*
* There has been no other valid attestation seen on an attestation subnet that has an
@ -693,6 +758,7 @@ fn unaggregated_gossip_verification() {
assert_invalid!(
"attestation that has already been seen",
valid_attestation.clone(),
subnet_id,
AttnError::PriorAttestationKnown {
validator_index,
epoch,
@ -701,244 +767,6 @@ fn unaggregated_gossip_verification() {
);
}
/// Tests the verification conditions for an unaggregated attestation on the gossip network.
#[test]
fn fork_choice_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
MainnetEthSpec::slots_per_epoch() as usize * 3 - 1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
// Advance into a slot where there have not been blocks or attestations produced.
harness.advance_slot();
// We're going to produce the attestations at the first slot of the epoch.
let (valid_attestation, _validator_index, _validator_committee_index, _validator_sk) =
get_valid_unaggregated_attestation(&harness.chain);
// Extend the chain two more blocks, but without any attestations so we don't trigger the
// "already seen" caches.
//
// Because of this, the attestation we're dealing with was made one slot prior to the current
// slot. This allows us to test the `AttestsToFutureBlock` condition.
harness.extend_chain(
2,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::SomeValidators(vec![]),
);
let current_slot = chain.slot().expect("should get slot");
let expected_current_epoch = chain.epoch().expect("should get epoch");
let attestation = harness
.chain
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone())
.expect("precondition: should gossip verify attestation");
macro_rules! assert_invalid {
($desc: tt, $attn_getter: expr, $($error: pat) |+ $( if $guard: expr )?) => {
assert!(
matches!(
harness
.chain
.apply_attestation_to_fork_choice(&$attn_getter)
.err()
.expect(&format!(
"{} should error during apply_attestation_to_fork_choice",
$desc
)),
$( $error ) |+ $( if $guard )?
),
"case: {}",
$desc,
);
};
}
assert_invalid!(
"attestation without any aggregation bits set",
{
let mut a = attestation.clone();
a.__indexed_attestation_mut().attesting_indices = vec![].into();
a
},
AttnError::EmptyAggregationBitfield
);
/*
* The following two tests ensure that:
*
* Spec v0.11.2
*
* assert target.epoch in [expected_current_epoch, previous_epoch]
*/
let future_epoch = expected_current_epoch + 1;
assert_invalid!(
"attestation from future epoch",
{
let mut a = attestation.clone();
a.__indexed_attestation_mut().data.target.epoch = future_epoch;
a
},
AttnError::FutureEpoch {
attestation_epoch,
current_epoch,
}
if attestation_epoch == future_epoch && current_epoch == expected_current_epoch
);
assert!(
expected_current_epoch > 1,
"precondition: must be able to have a past epoch"
);
let past_epoch = expected_current_epoch - 2;
assert_invalid!(
"attestation from past epoch",
{
let mut a = attestation.clone();
a.__indexed_attestation_mut().data.target.epoch = past_epoch;
a
},
AttnError::PastEpoch {
attestation_epoch,
current_epoch,
}
if attestation_epoch == past_epoch && current_epoch == expected_current_epoch
);
/*
* This test ensures that:
*
* Spec v0.11.2
*
* assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
*/
assert_invalid!(
"attestation with bad target epoch",
{
let mut a = attestation.clone();
let indexed = a.__indexed_attestation_mut();
indexed.data.target.epoch = indexed.data.slot.epoch(E::slots_per_epoch()) - 1;
a
},
AttnError::BadTargetEpoch
);
/*
* This test ensures that:
*
* Spec v0.11.2
*
* Attestations target be for a known block. If target block is unknown, delay consideration
* until the block is found
*
* assert target.root in store.blocks
*/
let unknown_root = Hash256::from_low_u64_le(42);
assert_invalid!(
"attestation with unknown target root",
{
let mut a = attestation.clone();
let indexed = a.__indexed_attestation_mut();
indexed.data.target.root = unknown_root;
a
},
AttnError::UnknownTargetRoot(hash) if hash == unknown_root
);
// NOTE: we're not testing an assert from the spec:
//
// `assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)`
//
// I think this check is redundant and I've raised an issue here:
//
// https://github.com/ethereum/eth2.0-specs/pull/1755
/*
* This test asserts that:
*
* Spec v0.11.2
*
* # Attestations must be for a known block. If block is unknown, delay consideration until the
* block is found
*
* assert attestation.data.beacon_block_root in store.blocks
*/
assert_invalid!(
"attestation with unknown beacon block root",
{
let mut a = attestation.clone();
let indexed = a.__indexed_attestation_mut();
indexed.data.beacon_block_root = unknown_root;
a
},
AttnError::UnknownHeadBlock {
beacon_block_root
}
if beacon_block_root == unknown_root
);
let future_block = harness
.chain
.block_at_slot(current_slot)
.expect("should not error getting block")
.expect("should find block at current slot");
assert_invalid!(
"attestation to future block",
{
let mut a = attestation.clone();
let indexed = a.__indexed_attestation_mut();
assert!(
future_block.slot() > indexed.data.slot,
"precondition: the attestation must attest to the future"
);
indexed.data.beacon_block_root = future_block.canonical_root();
a
},
AttnError::AttestsToFutureBlock {
block: current_slot,
attestation: slot,
}
if slot == current_slot - 1
);
// Note: we're not checking the "attestations can only affect the fork choice of subsequent
// slots" part of the spec, we do this upstream.
assert!(
harness
.chain
.apply_attestation_to_fork_choice(&attestation.clone())
.is_ok(),
"should verify valid attestation"
);
// There's nothing stopping fork choice from accepting the same attestation twice.
assert!(
harness
.chain
.apply_attestation_to_fork_choice(&attestation)
.is_ok(),
"should verify valid attestation a second time"
);
}
/// Ensures that an attestation that skips epochs can still be processed.
///
/// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache.
@ -951,7 +779,7 @@ fn attestation_that_skips_epochs() {
harness.extend_chain(
MainnetEthSpec::slots_per_epoch() as usize * 3 + 1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
AttestationStrategy::SomeValidators(vec![]),
);
let current_slot = chain.slot().expect("should get slot");
@ -972,7 +800,7 @@ fn attestation_that_skips_epochs() {
per_slot_processing(&mut state, None, &harness.spec).expect("should process slot");
}
let attestation = harness
let (attestation, subnet_id) = harness
.get_unaggregated_attestations(
&AttestationStrategy::AllValidators,
&state,
@ -1000,11 +828,8 @@ fn attestation_that_skips_epochs() {
"the attestation must skip more than two epochs"
);
assert!(
harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation)
.is_ok(),
"should gossip verify attestation that skips slots"
);
harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
.expect("should gossip verify attestation that skips slots");
}

View File

@ -558,7 +558,7 @@ fn block_gossip_verification() {
/*
* This test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) --
* i.e. validate that signed_beacon_block.message.slot <= current_slot (a client MAY queue
@ -583,7 +583,7 @@ fn block_gossip_verification() {
/*
* This test ensure that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block is from a slot greater than the latest finalized slot -- i.e. validate that
* signed_beacon_block.message.slot >
@ -616,7 +616,7 @@ fn block_gossip_verification() {
/*
* This test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The proposer signature, signed_beacon_block.signature, is valid with respect to the
* proposer_index pubkey.
@ -635,7 +635,7 @@ fn block_gossip_verification() {
/*
* This test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block is proposed by the expected proposer_index for the block's slot in the context of
* the current shuffling (defined by parent_root/slot). If the proposer_index cannot
@ -689,7 +689,7 @@ fn block_gossip_verification() {
/*
* This test ensures that:
*
* Spec v0.11.2
* Spec v0.12.1
*
* The block is the first block with valid signature received for the proposer for the slot,
* signed_beacon_block.message.slot.

View File

@ -0,0 +1,271 @@
//! Tests for gossip verification of voluntary exits, propser slashings and attester slashings.
#![cfg(not(debug_assertions))]
#[macro_use]
extern crate lazy_static;
use beacon_chain::observed_operations::ObservationOutcome;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
};
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
use store::{LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::test_utils::{
AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder,
TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder,
};
use types::*;
pub const VALIDATOR_COUNT: usize = 24;
lazy_static! {
/// A cached set of keys.
static ref KEYPAIRS: Vec<Keypair> =
types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let spec = E::default_spec();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default();
let log = NullLoggerBuilder.build().expect("logger should build");
Arc::new(
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
.expect("disk store should initialize"),
)
}
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
store,
KEYPAIRS[0..validator_count].to_vec(),
);
harness.advance_slot();
harness
}
#[test]
fn voluntary_exit() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
harness.extend_chain(
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let head_info = harness.chain.head_info().unwrap();
let make_exit = |validator_index: usize, exit_epoch: u64| {
TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build(
&KEYPAIRS[validator_index].sk,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
let validator_index1 = VALIDATOR_COUNT - 1;
let validator_index2 = VALIDATOR_COUNT - 2;
let exit1 = make_exit(validator_index1, spec.shard_committee_period);
// First verification should show it to be fresh.
assert!(matches!(
harness
.chain
.verify_voluntary_exit_for_gossip(exit1.clone())
.unwrap(),
ObservationOutcome::New(_)
));
// Second should not.
assert!(matches!(
harness
.chain
.verify_voluntary_exit_for_gossip(exit1.clone()),
Ok(ObservationOutcome::AlreadyKnown)
));
// A different exit for the same validator should also be detected as a duplicate.
let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1);
assert!(matches!(
harness.chain.verify_voluntary_exit_for_gossip(exit2),
Ok(ObservationOutcome::AlreadyKnown)
));
// Exit for a different validator should be fine.
let exit3 = make_exit(validator_index2, spec.shard_committee_period);
assert!(matches!(
harness
.chain
.verify_voluntary_exit_for_gossip(exit3)
.unwrap(),
ObservationOutcome::New(_)
));
}
#[test]
fn proposer_slashing() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let head_info = harness.chain.head_info().unwrap();
let validator_index1 = VALIDATOR_COUNT - 1;
let validator_index2 = VALIDATOR_COUNT - 2;
let make_slashing = |validator_index: usize| {
TestingProposerSlashingBuilder::double_vote::<E>(
ProposerSlashingTestTask::Valid,
validator_index as u64,
&KEYPAIRS[validator_index].sk,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
let slashing1 = make_slashing(validator_index1);
// First slashing for this proposer should be allowed.
assert!(matches!(
harness
.chain
.verify_proposer_slashing_for_gossip(slashing1.clone())
.unwrap(),
ObservationOutcome::New(_)
));
// Duplicate slashing should be detected.
assert!(matches!(
harness
.chain
.verify_proposer_slashing_for_gossip(slashing1.clone())
.unwrap(),
ObservationOutcome::AlreadyKnown
));
// Different slashing for the same index should be rejected
let slashing2 = ProposerSlashing {
signed_header_1: slashing1.signed_header_2,
signed_header_2: slashing1.signed_header_1,
};
assert!(matches!(
harness
.chain
.verify_proposer_slashing_for_gossip(slashing2)
.unwrap(),
ObservationOutcome::AlreadyKnown
));
// Proposer slashing for a different index should be accepted
let slashing3 = make_slashing(validator_index2);
assert!(matches!(
harness
.chain
.verify_proposer_slashing_for_gossip(slashing3)
.unwrap(),
ObservationOutcome::New(_)
));
}
#[test]
fn attester_slashing() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let head_info = harness.chain.head_info().unwrap();
// First third of the validators
let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::<Vec<_>>();
// First half of the validators
let first_half = (0..VALIDATOR_COUNT as u64 / 2).collect::<Vec<_>>();
// Last third of the validators
let last_third = (2 * VALIDATOR_COUNT as u64 / 3..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
// Last half of the validators
let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::<Vec<_>>();
let signer = |idx: u64, message: &[u8]| Signature::new(message, &KEYPAIRS[idx as usize].sk);
let make_slashing = |validators| {
TestingAttesterSlashingBuilder::double_vote::<_, E>(
AttesterSlashingTestTask::Valid,
validators,
signer,
&head_info.fork,
head_info.genesis_validators_root,
spec,
)
};
// Slashing for first third of validators should be accepted.
let slashing1 = make_slashing(&first_third);
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing1.clone())
.unwrap(),
ObservationOutcome::New(_)
));
// Overlapping slashing for first half of validators should also be accepted.
let slashing2 = make_slashing(&first_half);
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing2.clone())
.unwrap(),
ObservationOutcome::New(_)
));
// Repeating slashing1 or slashing2 should be rejected
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing1.clone())
.unwrap(),
ObservationOutcome::AlreadyKnown
));
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing2.clone())
.unwrap(),
ObservationOutcome::AlreadyKnown
));
// Slashing for last half of validators should be accepted (distinct from all existing)
let slashing3 = make_slashing(&second_half);
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing3)
.unwrap(),
ObservationOutcome::New(_)
));
// Slashing for last third (contained in last half) should be rejected.
let slashing4 = make_slashing(&last_third);
assert!(matches!(
harness
.chain
.verify_attester_slashing_for_gossip(slashing4)
.unwrap(),
ObservationOutcome::AlreadyKnown
));
}

View File

@ -154,7 +154,7 @@ fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b
"genesis_block_root should be equal"
);
assert!(
a.fork_choice == b.fork_choice,
*a.fork_choice.read() == *b.fork_choice.read(),
"fork_choice should be equal"
);
}

View File

@ -293,7 +293,7 @@ fn epoch_boundary_state_attestation_processing() {
let mut checked_pre_fin = false;
for attestation in late_attestations.into_iter().flatten() {
for (attestation, subnet_id) in late_attestations.into_iter().flatten() {
// load_epoch_boundary_state is idempotent!
let block_root = attestation.data.beacon_block_root;
let block = store.get_block(&block_root).unwrap().expect("block exists");
@ -317,7 +317,7 @@ fn epoch_boundary_state_attestation_processing() {
let res = harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation.clone());
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
let current_slot = harness.chain.slot().expect("should get slot");
let expected_attestation_slot = attestation.data.slot;

View File

@ -354,8 +354,6 @@ fn roundtrip_operation_pool() {
.persist_op_pool()
.expect("should persist op pool");
let head_state = harness.chain.head().expect("should get head").beacon_state;
let key = Hash256::from_slice(&OP_POOL_DB_KEY);
let restored_op_pool = harness
.chain
@ -363,7 +361,7 @@ fn roundtrip_operation_pool() {
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
.expect("should read db")
.expect("should find op pool")
.into_operation_pool(&head_state, &harness.spec);
.into_operation_pool();
assert_eq!(harness.chain.op_pool, restored_op_pool);
}
@ -381,7 +379,13 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
);
let state = &harness.chain.head().expect("should get head").beacon_state;
let fork_choice = &harness.chain.fork_choice;
let mut fork_choice = harness.chain.fork_choice.write();
// Move forward a slot so all queued attestations can be processed.
harness.advance_slot();
fork_choice
.update_time(harness.chain.slot().unwrap())
.unwrap();
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
.into_iter()
@ -403,7 +407,7 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
assert_eq!(
latest_message.unwrap().1,
slot.epoch(MinimalEthSpec::slots_per_epoch()),
"Latest message slot for {} should be equal to slot {}.",
"Latest message epoch for {} should be equal to epoch {}.",
validator,
slot
)
@ -453,10 +457,10 @@ fn attestations_with_increasing_slots() {
harness.advance_slot();
}
for attestation in attestations.into_iter().flatten() {
for (attestation, subnet_id) in attestations.into_iter().flatten() {
let res = harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation.clone());
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
let current_slot = harness.chain.slot().expect("should get slot");
let expected_attestation_slot = attestation.data.slot;
@ -491,7 +495,13 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() {
);
let state = &harness.chain.head().expect("should get head").beacon_state;
let fork_choice = &harness.chain.fork_choice;
let mut fork_choice = harness.chain.fork_choice.write();
// Move forward a slot so all queued attestations can be processed.
harness.advance_slot();
fork_choice
.update_time(harness.chain.slot().unwrap())
.unwrap();
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
let slots: Vec<Slot> = validators

View File

@ -204,7 +204,11 @@ where
"deposit_contract" => &config.eth1.deposit_contract_address
);
let genesis_service = Eth1GenesisService::new(config.eth1, context.log().clone());
let genesis_service = Eth1GenesisService::new(
config.eth1,
context.log().clone(),
context.eth2_config().spec.clone(),
);
let genesis_state = genesis_service
.wait_for_genesis_state(
@ -414,8 +418,6 @@ where
.clone()
.ok_or_else(|| "beacon_chain requires a slot clock")?,
)
.reduced_tree_fork_choice()
.map_err(|e| format!("Failed to init fork choice: {}", e))?
.build()
.map_err(|e| format!("Failed to build beacon chain: {}", e))?;
@ -627,6 +629,10 @@ where
let beacon_chain_builder = self
.beacon_chain_builder
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
let spec = self
.chain_spec
.clone()
.ok_or_else(|| "caching_eth1_backend requires a chain spec".to_string())?;
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
eth1_service_from_genesis.update_config(config)?;
@ -650,10 +656,17 @@ where
&persisted,
config.clone(),
&context.log().clone(),
spec.clone(),
)
.map(|chain| chain.into_backend())
})
.unwrap_or_else(|| Ok(CachingEth1Backend::new(config, context.log().clone())))?
.unwrap_or_else(|| {
Ok(CachingEth1Backend::new(
config,
context.log().clone(),
spec.clone(),
))
})?
};
self.eth1_service = None;

View File

@ -243,36 +243,38 @@ impl DepositCache {
}
}
/// Gets the deposit count at block height = block_number.
/// Returns the number of deposits with valid signatures that have been observed up to and
/// including the block at `block_number`.
///
/// Fetches the `DepositLog` that was emitted at or just before `block_number`
/// and returns the deposit count as `index + 1`.
///
/// Returns `None` if block number queried is 0 or less than deposit_contract_deployed block.
pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option<u64> {
// Contract cannot be deployed in 0'th block
if block_number == 0 {
return None;
}
if block_number < self.deposit_contract_deploy_block {
return None;
}
// Return 0 if block_num queried is before first deposit
if let Some(first_deposit) = self.logs.first() {
if first_deposit.block_number > block_number {
return Some(0);
}
}
let index = self
.logs
.binary_search_by(|deposit| deposit.block_number.cmp(&block_number));
match index {
Ok(index) => self.logs.get(index).map(|x| x.index + 1),
Err(next) => Some(
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
pub fn get_valid_signature_count(&self, block_number: u64) -> Option<usize> {
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
None
} else {
Some(
self.logs
.get(next.saturating_sub(1))
.map_or(0, |x| x.index + 1),
),
.iter()
.take_while(|deposit| deposit.block_number <= block_number)
.filter(|deposit| deposit.signature_is_valid)
.count(),
)
}
}
/// Returns the number of deposits that have been observed up to and
/// including the block at `block_number`.
///
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option<u64> {
if block_number == 0 || block_number < self.deposit_contract_deploy_block {
None
} else {
Some(
self.logs
.iter()
.take_while(|deposit| deposit.block_number <= block_number)
.count() as u64,
)
}
}
@ -291,15 +293,18 @@ pub mod tests {
use super::*;
use crate::deposit_log::tests::EXAMPLE_LOG;
use crate::http::Log;
use types::{EthSpec, MainnetEthSpec};
pub const TREE_DEPTH: usize = 32;
fn example_log() -> DepositLog {
let spec = MainnetEthSpec::default_spec();
let log = Log {
block_number: 42,
data: EXAMPLE_LOG.to_vec(),
};
DepositLog::from_log(&log).expect("should decode log")
DepositLog::from_log(&log, &spec).expect("should decode log")
}
#[test]

View File

@ -1,7 +1,10 @@
use super::http::Log;
use ssz::Decode;
use ssz_derive::{Decode, Encode};
use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes};
use state_processing::per_block_processing::signature_sets::{
deposit_pubkey_signature_message, deposit_signature_set,
};
use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes};
/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The
/// event bytes are formatted according to the Ethereum ABI.
@ -24,11 +27,13 @@ pub struct DepositLog {
pub block_number: u64,
/// The index included with the deposit log.
pub index: u64,
/// True if the signature is valid.
pub signature_is_valid: bool,
}
impl DepositLog {
/// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`.
pub fn from_log(log: &Log) -> Result<Self, String> {
pub fn from_log(log: &Log, spec: &ChainSpec) -> Result<Self, String> {
let bytes = &log.data;
let pubkey = bytes
@ -58,10 +63,15 @@ impl DepositLog {
.map_err(|e| format!("Invalid signature ssz: {:?}", e))?,
};
let deposit_signature_message = deposit_pubkey_signature_message(&deposit_data, spec)
.ok_or_else(|| "Unable to prepare deposit signature verification".to_string())?;
let signature_is_valid = deposit_signature_set(&deposit_signature_message).is_valid();
Ok(DepositLog {
deposit_data,
block_number: log.block_number,
index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?,
signature_is_valid,
})
}
}
@ -70,6 +80,7 @@ impl DepositLog {
pub mod tests {
use super::*;
use crate::http::Log;
use types::{EthSpec, MainnetEthSpec};
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
pub const EXAMPLE_LOG: &[u8] = &[
@ -103,6 +114,6 @@ pub mod tests {
block_number: 42,
data: EXAMPLE_LOG.to_vec(),
};
DepositLog::from_log(&log).expect("should decode log");
DepositLog::from_log(&log, &MainnetEthSpec::default_spec()).expect("should decode log");
}
}

View File

@ -6,6 +6,7 @@ use crate::{
use parking_lot::RwLock;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use types::ChainSpec;
#[derive(Default)]
pub struct DepositUpdater {
@ -28,6 +29,7 @@ pub struct Inner {
pub block_cache: RwLock<BlockCache>,
pub deposit_cache: RwLock<DepositUpdater>,
pub config: RwLock<Config>,
pub spec: ChainSpec,
}
impl Inner {
@ -47,10 +49,15 @@ impl Inner {
}
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
pub fn from_bytes(bytes: &[u8], config: Config) -> Result<Self, String> {
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes)
.map_err(|e| format!("Ssz decoding error: {:?}", e))?;
Ok(ssz_cache.to_inner(config)?)
Ok(ssz_cache.to_inner(config, spec)?)
}
/// Returns a reference to the specification.
pub fn spec(&self) -> &ChainSpec {
&self.spec
}
}
@ -72,7 +79,7 @@ impl SszEth1Cache {
}
}
pub fn to_inner(&self, config: Config) -> Result<Inner, String> {
pub fn to_inner(&self, config: Config, spec: ChainSpec) -> Result<Inner, String> {
Ok(Inner {
block_cache: RwLock::new(self.block_cache.clone()),
deposit_cache: RwLock::new(DepositUpdater {
@ -80,6 +87,7 @@ impl SszEth1Cache {
last_processed_block: self.last_processed_block,
}),
config: RwLock::new(config),
spec,
})
}
}

View File

@ -14,6 +14,7 @@ use std::ops::{Range, RangeInclusive};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{interval_at, Duration, Instant};
use types::ChainSpec;
const STANDARD_TIMEOUT_MILLIS: u64 = 15_000;
@ -130,14 +131,15 @@ pub struct Service {
impl Service {
/// Creates a new service. Does not attempt to connect to the eth1 node.
pub fn new(config: Config, log: Logger) -> Self {
pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self {
Self {
inner: Arc::new(Inner {
block_cache: <_>::default(),
deposit_cache: RwLock::new(DepositUpdater::new(
config.deposit_contract_deploy_block,
)),
config: RwLock::new(config),
..Inner::default()
spec,
}),
log,
}
@ -149,8 +151,13 @@ impl Service {
}
/// Recover the deposit and block caches from encoded bytes.
pub fn from_bytes(bytes: &[u8], config: Config, log: Logger) -> Result<Self, String> {
let inner = Inner::from_bytes(bytes, config)?;
pub fn from_bytes(
bytes: &[u8],
config: Config,
log: Logger,
spec: ChainSpec,
) -> Result<Self, String> {
let inner = Inner::from_bytes(bytes, config, spec)?;
Ok(Self {
inner: Arc::new(inner),
log,
@ -194,6 +201,14 @@ impl Service {
self.inner.block_cache.read().lowest_block_number()
}
/// Returns the highest block that is present in both the deposit and block caches.
pub fn highest_safe_block(&self) -> Option<u64> {
let block_cache = self.blocks().read().highest_block_number()?;
let deposit_cache = self.deposits().read().last_processed_block?;
Some(std::cmp::min(block_cache, deposit_cache))
}
/// Returns the number of currently cached blocks.
pub fn block_cache_len(&self) -> usize {
self.blocks().read().len()
@ -204,6 +219,25 @@ impl Service {
self.deposits().read().cache.len()
}
/// Returns the number of deposits with valid signatures that have been observed.
pub fn get_valid_signature_count(&self) -> Option<usize> {
self.deposits()
.read()
.cache
.get_valid_signature_count(self.highest_safe_block()?)
}
/// Returns the number of deposits with valid signatures that have been observed up to and
/// including the block at `block_number`.
///
/// Returns `None` if the `block_number` is zero or prior to contract deployment.
pub fn get_valid_signature_count_at_block(&self, block_number: u64) -> Option<usize> {
self.deposits()
.read()
.cache
.get_valid_signature_count(block_number)
}
/// Read the service's configuration.
pub fn config(&self) -> RwLockReadGuard<Config> {
self.inner.config.read()
@ -402,9 +436,11 @@ impl Service {
log_chunk
.into_iter()
.map(|raw_log| {
DepositLog::from_log(&raw_log).map_err(|error| Error::FailedToParseDepositLog {
block_range: block_range.clone(),
error,
DepositLog::from_log(&raw_log, service.inner.spec()).map_err(|error| {
Error::FailedToParseDepositLog {
block_range: block_range.clone(),
error,
}
})
})
// Return early if any of the logs cannot be parsed.

View File

@ -99,6 +99,7 @@ async fn get_block_number(web3: &Web3<Http>) -> u64 {
mod eth1_cache {
use super::*;
use types::{EthSpec, MainnetEthSpec};
#[tokio::test]
async fn simple_scenario() {
@ -122,6 +123,7 @@ mod eth1_cache {
..Config::default()
},
log.clone(),
MainnetEthSpec::default_spec(),
);
// Create some blocks and then consume them, performing the test `rounds` times.
@ -194,6 +196,7 @@ mod eth1_cache {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
let blocks = cache_len * 2;
@ -240,6 +243,7 @@ mod eth1_cache {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
for _ in 0..4u8 {
@ -282,6 +286,7 @@ mod eth1_cache {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
for _ in 0..n {
@ -328,6 +333,7 @@ mod deposit_tree {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
for round in 0..3 {
@ -401,6 +407,7 @@ mod deposit_tree {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
@ -425,6 +432,8 @@ mod deposit_tree {
async fn cache_consistency() {
let n = 8;
let spec = &MainnetEthSpec::default_spec();
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
let eth1 = GanacheEth1Instance::new()
@ -462,7 +471,7 @@ mod deposit_tree {
let logs: Vec<_> = blocking_deposit_logs(&eth1, 0..block_number)
.await
.iter()
.map(|raw| DepositLog::from_log(raw).expect("should parse deposit log"))
.map(|raw| DepositLog::from_log(raw, spec).expect("should parse deposit log"))
.inspect(|log| {
tree.insert_log(log.clone())
.expect("should add consecutive logs")
@ -639,6 +648,7 @@ mod fast {
..Config::default()
},
log,
MainnetEthSpec::default_spec(),
);
let n = 10;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
@ -708,7 +718,7 @@ mod persist {
block_cache_truncation: None,
..Config::default()
};
let service = Service::new(config.clone(), log.clone());
let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec());
let n = 10;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits {
@ -745,7 +755,8 @@ mod persist {
// Drop service and recover from bytes
drop(service);
let recovered_service = Service::from_bytes(&eth1_bytes, config, log).unwrap();
let recovered_service =
Service::from_bytes(&eth1_bytes, config, log, MainnetEthSpec::default_spec()).unwrap();
assert_eq!(
recovered_service.block_cache_len(),
block_count,

View File

@ -40,7 +40,7 @@ libp2p-tcp = { version = "0.19.1", default-features = false, features = ["tokio"
[dependencies.libp2p]
version = "0.19.1"
default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "secio", "gossipsub", "dns"]
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns"]
[dev-dependencies]

View File

@ -1,11 +1,15 @@
use crate::rpc::methods::*;
use crate::rpc::{
codec::base::OutboundCodec,
protocol::{Encoding, Protocol, ProtocolId, RPCError, Version},
protocol::{
Encoding, Protocol, ProtocolId, RPCError, Version, BLOCKS_BY_ROOT_REQUEST_MAX,
BLOCKS_BY_ROOT_REQUEST_MIN, SIGNED_BEACON_BLOCK_MAX, SIGNED_BEACON_BLOCK_MIN,
},
};
use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse};
use libp2p::bytes::{BufMut, Bytes, BytesMut};
use ssz::{Decode, Encode};
use ssz_types::VariableList;
use std::marker::PhantomData;
use tokio_util::codec::{Decoder, Encoder};
use types::{EthSpec, SignedBeaconBlock};
@ -52,9 +56,9 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZInboundCodec<TSpec>
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
RPCResponse::MetaData(res) => res.as_ssz_bytes(),
},
RPCCodedResponse::InvalidRequest(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::ServerError(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::Unknown(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::InvalidRequest(err) => err.as_ssz_bytes(),
RPCCodedResponse::ServerError(err) => err.as_ssz_bytes(),
RPCCodedResponse::Unknown(err) => err.as_ssz_bytes(),
RPCCodedResponse::StreamTermination(_) => {
unreachable!("Code error - attempting to encode a stream termination")
}
@ -83,29 +87,61 @@ impl<TSpec: EthSpec> Decoder for SSZInboundCodec<TSpec> {
match self.inner.decode(src).map_err(RPCError::from) {
Ok(Some(packet)) => match self.protocol.message_name {
Protocol::Status => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
&packet,
)?))),
Version::V1 => {
if packet.len() == <StatusMessage as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
&packet,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Goodbye => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
&packet,
)?))),
Version::V1 => {
if packet.len() == <GoodbyeReason as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
&packet,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRange => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::BlocksByRange(
BlocksByRangeRequest::from_ssz_bytes(&packet)?,
))),
Version::V1 => {
if packet.len() == <BlocksByRangeRequest as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::BlocksByRange(
BlocksByRangeRequest::from_ssz_bytes(&packet)?,
)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRoot => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: Vec::from_ssz_bytes(&packet)?,
}))),
Version::V1 => {
if packet.len() >= *BLOCKS_BY_ROOT_REQUEST_MIN
&& packet.len() <= *BLOCKS_BY_ROOT_REQUEST_MAX
{
Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(&packet)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Ping => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Ping(Ping {
data: u64::from_ssz_bytes(&packet)?,
}))),
Version::V1 => {
if packet.len() == <Ping as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Ping(Ping {
data: u64::from_ssz_bytes(&packet)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::MetaData => match self.protocol.version {
Version::V1 => {
@ -207,30 +243,64 @@ impl<TSpec: EthSpec> Decoder for SSZOutboundCodec<TSpec> {
match self.protocol.message_name {
Protocol::Status => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::Status(
StatusMessage::from_ssz_bytes(&raw_bytes)?,
))),
Version::V1 => {
if raw_bytes.len() == <StatusMessage as Encode>::ssz_fixed_len() {
Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
&raw_bytes,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Goodbye => Err(RPCError::InvalidData),
Protocol::BlocksByRange => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new(
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
)))),
Version::V1 => {
if raw_bytes.len() >= *SIGNED_BEACON_BLOCK_MIN
&& raw_bytes.len() <= *SIGNED_BEACON_BLOCK_MAX
{
Ok(Some(RPCResponse::BlocksByRange(Box::new(
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
))))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRoot => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new(
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
)))),
Version::V1 => {
if raw_bytes.len() >= *SIGNED_BEACON_BLOCK_MIN
&& raw_bytes.len() <= *SIGNED_BEACON_BLOCK_MAX
{
Ok(Some(RPCResponse::BlocksByRoot(Box::new(
SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?,
))))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Ping => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(&raw_bytes)?,
}))),
Version::V1 => {
if raw_bytes.len() == <Ping as Encode>::ssz_fixed_len() {
Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(&raw_bytes)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::MetaData => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::MetaData(
MetaData::from_ssz_bytes(&raw_bytes)?,
))),
Version::V1 => {
if raw_bytes.len() == <MetaData<TSpec> as Encode>::ssz_fixed_len() {
Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes(
&raw_bytes,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
}
}

View File

@ -1,13 +1,17 @@
use crate::rpc::methods::*;
use crate::rpc::{
codec::base::OutboundCodec,
protocol::{Encoding, Protocol, ProtocolId, RPCError, Version},
protocol::{
Encoding, Protocol, ProtocolId, RPCError, Version, BLOCKS_BY_ROOT_REQUEST_MAX,
BLOCKS_BY_ROOT_REQUEST_MIN, SIGNED_BEACON_BLOCK_MAX, SIGNED_BEACON_BLOCK_MIN,
},
};
use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse};
use libp2p::bytes::BytesMut;
use snap::read::FrameDecoder;
use snap::write::FrameEncoder;
use ssz::{Decode, Encode};
use ssz_types::VariableList;
use std::io::Cursor;
use std::io::ErrorKind;
use std::io::{Read, Write};
@ -60,9 +64,9 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
RPCResponse::MetaData(res) => res.as_ssz_bytes(),
},
RPCCodedResponse::InvalidRequest(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::ServerError(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::Unknown(err) => err.into_bytes().as_ssz_bytes(),
RPCCodedResponse::InvalidRequest(err) => err.as_ssz_bytes(),
RPCCodedResponse::ServerError(err) => err.as_ssz_bytes(),
RPCCodedResponse::Unknown(err) => err.as_ssz_bytes(),
RPCCodedResponse::StreamTermination(_) => {
unreachable!("Code error - attempting to encode a stream termination")
}
@ -122,29 +126,63 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
let _read_bytes = src.split_to(n as usize);
match self.protocol.message_name {
Protocol::Status => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
&decoded_buffer,
)?))),
Version::V1 => {
if decoded_buffer.len() == <StatusMessage as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes(
&decoded_buffer,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Goodbye => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Goodbye(
GoodbyeReason::from_ssz_bytes(&decoded_buffer)?,
))),
Version::V1 => {
if decoded_buffer.len() == <GoodbyeReason as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
&decoded_buffer,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRange => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::BlocksByRange(
BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?,
))),
Version::V1 => {
if decoded_buffer.len()
== <BlocksByRangeRequest as Encode>::ssz_fixed_len()
{
Ok(Some(RPCRequest::BlocksByRange(
BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?,
)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRoot => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: Vec::from_ssz_bytes(&decoded_buffer)?,
}))),
Version::V1 => {
if decoded_buffer.len() >= *BLOCKS_BY_ROOT_REQUEST_MIN
&& decoded_buffer.len() <= *BLOCKS_BY_ROOT_REQUEST_MAX
{
Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest {
block_roots: VariableList::from_ssz_bytes(&decoded_buffer)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Ping => match self.protocol.version {
Version::V1 => Ok(Some(RPCRequest::Ping(Ping::from_ssz_bytes(
&decoded_buffer,
)?))),
Version::V1 => {
if decoded_buffer.len() == <Ping as Encode>::ssz_fixed_len() {
Ok(Some(RPCRequest::Ping(Ping {
data: u64::from_ssz_bytes(&decoded_buffer)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::MetaData => match self.protocol.version {
Version::V1 => {
@ -267,33 +305,65 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
let _read_byts = src.split_to(n as usize);
match self.protocol.message_name {
Protocol::Status => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::Status(
StatusMessage::from_ssz_bytes(&decoded_buffer)?,
))),
Version::V1 => {
if decoded_buffer.len() == <StatusMessage as Encode>::ssz_fixed_len() {
Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
&decoded_buffer,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Goodbye => {
// Goodbye does not have a response
Err(RPCError::InvalidData)
}
Protocol::Goodbye => Err(RPCError::InvalidData),
Protocol::BlocksByRange => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
)))),
Version::V1 => {
if decoded_buffer.len() >= *SIGNED_BEACON_BLOCK_MIN
&& decoded_buffer.len() <= *SIGNED_BEACON_BLOCK_MAX
{
Ok(Some(RPCResponse::BlocksByRange(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
))))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::BlocksByRoot => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
)))),
Version::V1 => {
if decoded_buffer.len() >= *SIGNED_BEACON_BLOCK_MIN
&& decoded_buffer.len() <= *SIGNED_BEACON_BLOCK_MAX
{
Ok(Some(RPCResponse::BlocksByRoot(Box::new(
SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?,
))))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::Ping => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(&decoded_buffer)?,
}))),
Version::V1 => {
if decoded_buffer.len() == <Ping as Encode>::ssz_fixed_len() {
Ok(Some(RPCResponse::Pong(Ping {
data: u64::from_ssz_bytes(&decoded_buffer)?,
})))
} else {
Err(RPCError::InvalidData)
}
}
},
Protocol::MetaData => match self.protocol.version {
Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes(
&decoded_buffer,
)?))),
Version::V1 => {
if decoded_buffer.len() == <MetaData<TSpec> as Encode>::ssz_fixed_len()
{
Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes(
&decoded_buffer,
)?)))
} else {
Err(RPCError::InvalidData)
}
}
},
}
}

View File

@ -393,7 +393,7 @@ where
let err = HandlerErr::Inbound {
id: inbound_id,
proto: *protocol,
error: RPCError::ErrorResponse(*code, reason.clone()),
error: RPCError::ErrorResponse(*code, reason.to_string()),
};
self.pending_errors.push(err);
}
@ -1069,7 +1069,7 @@ where
Err(HandlerErr::Outbound {
id,
proto,
error: RPCError::ErrorResponse(code, r.clone()),
error: RPCError::ErrorResponse(code, r.to_string()),
})
}
};

View File

@ -3,8 +3,52 @@
use crate::types::EnrBitfield;
use serde::Serialize;
use ssz_derive::{Decode, Encode};
use ssz_types::{
typenum::{U1024, U256},
VariableList,
};
use std::ops::Deref;
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// Maximum number of blocks in a single request.
pub type MaxRequestBlocks = U1024;
pub const MAX_REQUEST_BLOCKS: u64 = 1024;
/// Maximum length of error message.
type MaxErrorLen = U256;
/// Wrapper over SSZ List to represent error message in rpc responses.
#[derive(Debug, Clone)]
pub struct ErrorType(VariableList<u8, MaxErrorLen>);
impl From<String> for ErrorType {
fn from(s: String) -> Self {
Self(VariableList::from(s.as_bytes().to_vec()))
}
}
impl From<&str> for ErrorType {
fn from(s: &str) -> Self {
Self(VariableList::from(s.as_bytes().to_vec()))
}
}
impl Deref for ErrorType {
type Target = VariableList<u8, MaxErrorLen>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ToString for ErrorType {
fn to_string(&self) -> String {
match std::str::from_utf8(self.0.deref()) {
Ok(s) => s.to_string(),
Err(_) => format!("{:?}", self.0.deref()), // Display raw bytes if not a UTF-8 string
}
}
}
/* Request/Response data structures for RPC methods */
/* Requests */
@ -144,10 +188,10 @@ pub struct BlocksByRangeRequest {
}
/// Request a number of beacon block bodies from a peer.
#[derive(Clone, Debug, PartialEq)]
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlocksByRootRequest {
/// The list of beacon block bodies being requested.
pub block_roots: Vec<Hash256>,
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
}
/* RPC Handling and Grouping */
@ -190,13 +234,13 @@ pub enum RPCCodedResponse<T: EthSpec> {
Success(RPCResponse<T>),
/// The response was invalid.
InvalidRequest(String),
InvalidRequest(ErrorType),
/// The response indicates a server error.
ServerError(String),
ServerError(ErrorType),
/// There was an unknown response.
Unknown(String),
Unknown(ErrorType),
/// Received a stream termination indicating which response is being terminated.
StreamTermination(ResponseTermination),
@ -233,18 +277,18 @@ impl<T: EthSpec> RPCCodedResponse<T> {
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
pub fn from_error(response_code: u8, err: String) -> Self {
match response_code {
1 => RPCCodedResponse::InvalidRequest(err),
2 => RPCCodedResponse::ServerError(err),
_ => RPCCodedResponse::Unknown(err),
1 => RPCCodedResponse::InvalidRequest(err.into()),
2 => RPCCodedResponse::ServerError(err.into()),
_ => RPCCodedResponse::Unknown(err.into()),
}
}
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
pub fn from_error_code(response_code: RPCResponseErrorCode, err: String) -> Self {
match response_code {
RPCResponseErrorCode::InvalidRequest => RPCCodedResponse::InvalidRequest(err),
RPCResponseErrorCode::ServerError => RPCCodedResponse::ServerError(err),
RPCResponseErrorCode::Unknown => RPCCodedResponse::Unknown(err),
RPCResponseErrorCode::InvalidRequest => RPCCodedResponse::InvalidRequest(err.into()),
RPCResponseErrorCode::ServerError => RPCCodedResponse::ServerError(err.into()),
RPCResponseErrorCode::Unknown => RPCCodedResponse::Unknown(err.into()),
}
}

View File

@ -23,8 +23,8 @@ pub(crate) use protocol::{RPCProtocol, RPCRequest};
pub use handler::SubstreamId;
pub use methods::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RPCResponseErrorCode, RequestId,
ResponseTermination, StatusMessage,
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks,
RPCResponseErrorCode, RequestId, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
};
pub use protocol::{Protocol, RPCError};

View File

@ -9,11 +9,14 @@ use crate::rpc::{
InboundCodec, OutboundCodec,
},
methods::ResponseTermination,
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
};
use futures::future::Ready;
use futures::prelude::*;
use futures::prelude::{AsyncRead, AsyncWrite};
use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
use ssz::Encode;
use ssz_types::VariableList;
use std::io;
use std::marker::PhantomData;
use std::pin::Pin;
@ -23,7 +26,38 @@ use tokio_util::{
codec::Framed,
compat::{Compat, FuturesAsyncReadCompatExt},
};
use types::EthSpec;
use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock};
lazy_static! {
// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
// same across different `EthSpec` implementations.
pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::<MainnetEthSpec> {
message: BeaconBlock::empty(&MainnetEthSpec::default_spec()),
signature: Signature::empty_signature(),
}
.as_ssz_bytes()
.len();
pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::<MainnetEthSpec> {
message: BeaconBlock::full(&MainnetEthSpec::default_spec()),
signature: Signature::empty_signature(),
}
.as_ssz_bytes()
.len();
pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = BlocksByRootRequest {
block_roots: VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new())
}
.as_ssz_bytes()
.len();
pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize = BlocksByRootRequest {
block_roots: VariableList::<Hash256, MaxRequestBlocks>::from(vec![
Hash256::zero();
MAX_REQUEST_BLOCKS
as usize
])
}
.as_ssz_bytes()
.len();
}
/// The maximum bytes that can be sent across the RPC.
const MAX_RPC_SIZE: usize = 1_048_576; // 1M

View File

@ -7,15 +7,11 @@ use crate::EnrExt;
use crate::{NetworkConfig, NetworkGlobals};
use futures::prelude::*;
use libp2p::core::{
identity::Keypair,
multiaddr::Multiaddr,
muxing::StreamMuxerBox,
transport::boxed::Boxed,
upgrade::{InboundUpgradeExt, OutboundUpgradeExt},
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::boxed::Boxed,
ConnectedPoint,
};
use libp2p::{
core, noise, secio,
core, noise,
swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
PeerId, Swarm, Transport,
};
@ -118,7 +114,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
let mut swarm = {
// Set up the transport - tcp/ws with noise/secio and mplex/yamux
// Set up the transport - tcp/ws with noise and yamux/mplex
let transport = build_transport(local_keypair.clone())
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
// Lighthouse network behaviour
@ -369,8 +365,8 @@ impl<TSpec: EthSpec> Service<TSpec> {
}
}
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise/secio as the encryption
/// layer, and mplex or yamux as the multiplexing layer.
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
/// yamux or mplex as the multiplexing layer.
fn build_transport(
local_private_key: Keypair,
) -> Result<Boxed<(PeerId, StreamMuxerBox), Error>, Error> {
@ -382,47 +378,18 @@ fn build_transport(
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
};
// Authentication
let transport = transport
.and_then(move |stream, endpoint| {
let upgrade = core::upgrade::SelectUpgrade::new(
generate_noise_config(&local_private_key),
secio::SecioConfig::new(local_private_key),
);
core::upgrade::apply(stream, upgrade, endpoint, core::upgrade::Version::V1).and_then(
|out| async move {
match out {
// Noise was negotiated
core::either::EitherOutput::First((remote_id, out)) => {
Ok((core::either::EitherOutput::First(out), remote_id))
}
// Secio was negotiated
core::either::EitherOutput::Second((remote_id, out)) => {
Ok((core::either::EitherOutput::Second(out), remote_id))
}
}
},
)
})
.timeout(Duration::from_secs(20));
// Multiplexing
let transport = transport
.and_then(move |(stream, peer_id), endpoint| {
let peer_id2 = peer_id.clone();
let upgrade = core::upgrade::SelectUpgrade::new(
libp2p::yamux::Config::default(),
libp2p::mplex::MplexConfig::new(),
)
.map_inbound(move |muxer| (peer_id, muxer))
.map_outbound(move |muxer| (peer_id2, muxer));
core::upgrade::apply(stream, upgrade, endpoint, core::upgrade::Version::V1)
.map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer)))
})
Ok(transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
libp2p::yamux::Config::default(),
libp2p::mplex::MplexConfig::new(),
))
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
.timeout(Duration::from_secs(20))
.timeout(Duration::from_secs(20))
.map_err(|err| Error::new(ErrorKind::Other, err))
.boxed();
Ok(transport)
.boxed())
}
// Useful helper functions for debugging. Currently not used in the client.

View File

@ -9,7 +9,7 @@ use std::boxed::Box;
use types::SubnetId;
use types::{
Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof,
SignedBeaconBlock, VoluntaryExit,
SignedBeaconBlock, SignedVoluntaryExit,
};
#[derive(Debug, Clone, PartialEq)]
@ -21,7 +21,7 @@ pub enum PubsubMessage<T: EthSpec> {
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
Attestation(Box<(SubnetId, Attestation<T>)>),
/// Gossipsub message providing notification of a voluntary exit.
VoluntaryExit(Box<VoluntaryExit>),
VoluntaryExit(Box<SignedVoluntaryExit>),
/// Gossipsub message providing notification of a new proposer slashing.
ProposerSlashing(Box<ProposerSlashing>),
/// Gossipsub message providing notification of a new attester slashing.
@ -41,7 +41,7 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock,
PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof,
PubsubMessage::Attestation(attestation_data) => {
GossipKind::CommitteeIndex(attestation_data.0)
GossipKind::Attestation(attestation_data.0)
}
PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit,
PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing,
@ -68,41 +68,37 @@ impl<T: EthSpec> PubsubMessage<T> {
continue;
}
Ok(gossip_topic) => {
let mut decompressed_data: Vec<u8> = Vec::new();
let data = match gossip_topic.encoding() {
// group each part by encoding type
let ref decompressed_data = match gossip_topic.encoding() {
GossipEncoding::SSZSnappy => {
// Exit early if uncompressed data is > GOSSIP_MAX_SIZE
match decompress_len(data) {
Ok(n) if n > GOSSIP_MAX_SIZE => {
return Err("ssz_snappy decoded data > GOSSIP_MAX_SIZE".into());
}
Ok(n) => decompressed_data.resize(n, 0),
Ok(_) => {}
Err(e) => {
return Err(format!("{}", e));
}
};
let mut decoder = Decoder::new();
match decoder.decompress(data, &mut decompressed_data) {
Ok(n) => {
decompressed_data.truncate(n);
&decompressed_data
}
match decoder.decompress_vec(data) {
Ok(decompressed_data) => decompressed_data,
Err(e) => return Err(format!("{}", e)),
}
}
GossipEncoding::SSZ => data,
};
// the ssz decoders
match gossip_topic.kind() {
GossipKind::BeaconAggregateAndProof => {
let agg_and_proof = SignedAggregateAndProof::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
let agg_and_proof =
SignedAggregateAndProof::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::AggregateAndProofAttestation(Box::new(
agg_and_proof,
)));
}
GossipKind::CommitteeIndex(subnet_id) => {
let attestation = Attestation::from_ssz_bytes(data)
GossipKind::Attestation(subnet_id) => {
let attestation = Attestation::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::Attestation(Box::new((
*subnet_id,
@ -110,25 +106,28 @@ impl<T: EthSpec> PubsubMessage<T> {
))));
}
GossipKind::BeaconBlock => {
let beacon_block = SignedBeaconBlock::from_ssz_bytes(data)
let beacon_block = SignedBeaconBlock::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block)));
}
GossipKind::VoluntaryExit => {
let voluntary_exit = VoluntaryExit::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
let voluntary_exit =
SignedVoluntaryExit::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::VoluntaryExit(Box::new(voluntary_exit)));
}
GossipKind::ProposerSlashing => {
let proposer_slashing = ProposerSlashing::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
let proposer_slashing =
ProposerSlashing::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::ProposerSlashing(Box::new(
proposer_slashing,
)));
}
GossipKind::AttesterSlashing => {
let attester_slashing = AttesterSlashing::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
let attester_slashing =
AttesterSlashing::from_ssz_bytes(decompressed_data)
.map_err(|e| format!("{:?}", e))?;
return Ok(PubsubMessage::AttesterSlashing(Box::new(
attester_slashing,
)));
@ -152,13 +151,6 @@ impl<T: EthSpec> PubsubMessage<T> {
PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(),
};
match encoding {
GossipEncoding::SSZ => {
if data.len() > GOSSIP_MAX_SIZE {
return Err("ssz encoded data > GOSSIP_MAX_SIZE".into());
} else {
Ok(data)
}
}
GossipEncoding::SSZSnappy => {
let mut encoder = Encoder::new();
match encoder.compress_vec(&data) {

View File

@ -6,14 +6,10 @@ use types::SubnetId;
// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX
// For example /eth2/beacon_block/ssz
pub const TOPIC_PREFIX: &str = "eth2";
pub const SSZ_ENCODING_POSTFIX: &str = "ssz";
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof";
// for speed and easier string manipulation, committee topic index is split into a prefix and a
// postfix. The topic is committee_index{}_beacon_attestation where {} is an integer.
pub const COMMITEE_INDEX_TOPIC_PREFIX: &str = "committee_index";
pub const COMMITEE_INDEX_TOPIC_POSTFIX: &str = "_beacon_attestation";
pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_";
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
@ -39,7 +35,7 @@ pub enum GossipKind {
/// Topic for publishing aggregate attestations and proofs.
BeaconAggregateAndProof,
/// Topic for publishing raw attestations on a particular subnet.
CommitteeIndex(SubnetId),
Attestation(SubnetId),
/// Topic for publishing voluntary exits.
VoluntaryExit,
/// Topic for publishing block proposer slashings.
@ -53,7 +49,7 @@ impl std::fmt::Display for GossipKind {
match self {
GossipKind::BeaconBlock => write!(f, "beacon_block"),
GossipKind::BeaconAggregateAndProof => write!(f, "beacon_aggregate_and_proof"),
GossipKind::CommitteeIndex(subnet_id) => write!(f, "committee_index_{}", **subnet_id),
GossipKind::Attestation(subnet_id) => write!(f, "beacon_attestation_{}", **subnet_id),
GossipKind::VoluntaryExit => write!(f, "voluntary_exit"),
GossipKind::ProposerSlashing => write!(f, "proposer_slashing"),
GossipKind::AttesterSlashing => write!(f, "attester_slashing"),
@ -64,8 +60,6 @@ impl std::fmt::Display for GossipKind {
/// The known encoding types for gossipsub messages.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum GossipEncoding {
/// Messages are encoded with SSZ.
SSZ,
/// Messages are encoded with SSZSnappy.
SSZSnappy,
}
@ -117,7 +111,6 @@ impl GossipTopic {
fork_digest.copy_from_slice(&digest_bytes);
let encoding = match topic_parts[4] {
SSZ_ENCODING_POSTFIX => GossipEncoding::SSZ,
SSZ_SNAPPY_ENCODING_POSTFIX => GossipEncoding::SSZSnappy,
_ => return Err(format!("Unknown encoding: {}", topic)),
};
@ -128,7 +121,7 @@ impl GossipTopic {
PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing,
ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing,
topic => match committee_topic_index(topic) {
Some(subnet_id) => GossipKind::CommitteeIndex(subnet_id),
Some(subnet_id) => GossipKind::Attestation(subnet_id),
None => return Err(format!("Unknown topic: {}", topic)),
},
};
@ -153,7 +146,6 @@ impl Into<Topic> for GossipTopic {
impl Into<String> for GossipTopic {
fn into(self) -> String {
let encoding = match self.encoding {
GossipEncoding::SSZ => SSZ_ENCODING_POSTFIX,
GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX,
};
@ -163,10 +155,7 @@ impl Into<String> for GossipTopic {
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(),
GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(),
GossipKind::CommitteeIndex(index) => format!(
"{}{}{}",
COMMITEE_INDEX_TOPIC_PREFIX, *index, COMMITEE_INDEX_TOPIC_POSTFIX
),
GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,),
};
format!(
"/{}/{}/{}/{}",
@ -180,7 +169,7 @@ impl Into<String> for GossipTopic {
impl From<SubnetId> for GossipKind {
fn from(subnet_id: SubnetId) -> Self {
GossipKind::CommitteeIndex(subnet_id)
GossipKind::Attestation(subnet_id)
}
}
@ -188,17 +177,9 @@ impl From<SubnetId> for GossipKind {
// Determines if a string is a committee topic.
fn committee_topic_index(topic: &str) -> Option<SubnetId> {
if topic.starts_with(COMMITEE_INDEX_TOPIC_PREFIX)
&& topic.ends_with(COMMITEE_INDEX_TOPIC_POSTFIX)
{
if topic.starts_with(BEACON_ATTESTATION_PREFIX) {
return Some(SubnetId::new(
u64::from_str_radix(
topic
.trim_start_matches(COMMITEE_INDEX_TOPIC_PREFIX)
.trim_end_matches(COMMITEE_INDEX_TOPIC_POSTFIX),
10,
)
.ok()?,
u64::from_str_radix(topic.trim_start_matches(BEACON_ATTESTATION_PREFIX), 10).ok()?,
));
}
None

View File

@ -1,184 +0,0 @@
#![cfg(test)]
use crate::behaviour::Behaviour;
use crate::multiaddr::Protocol;
use ::types::{EnrForkId, MinimalEthSpec};
use eth2_libp2p::discovery::{build_enr, CombinedKey, CombinedKeyExt};
use eth2_libp2p::*;
use futures::prelude::*;
use libp2p::core::identity::Keypair;
use libp2p::{
core,
core::{muxing::StreamMuxerBox, transport::boxed::Boxed},
secio,
swarm::{SwarmBuilder, SwarmEvent},
PeerId, Swarm, Transport,
};
use slog::{crit, debug, info, Level};
use std::io::{Error, ErrorKind};
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
type TSpec = MinimalEthSpec;
mod common;
type Libp2pBehaviour = Behaviour<TSpec>;
/// Build and return a eth2_libp2p Swarm with only secio support.
fn build_secio_swarm(
config: &NetworkConfig,
log: slog::Logger,
) -> error::Result<Swarm<Libp2pBehaviour>> {
let local_keypair = Keypair::generate_secp256k1();
let local_peer_id = PeerId::from(local_keypair.public());
let enr_key = CombinedKey::from_libp2p(&local_keypair).unwrap();
let enr = build_enr::<TSpec>(&enr_key, config, EnrForkId::default()).unwrap();
let network_globals = Arc::new(NetworkGlobals::new(
enr,
config.libp2p_port,
config.discovery_port,
&log,
));
let mut swarm = {
// Set up the transport - tcp/ws with secio and mplex/yamux
let transport = build_secio_transport(local_keypair.clone());
// Lighthouse network behaviour
let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?;
// requires a tokio runtime
struct Executor(tokio::runtime::Handle);
impl libp2p::core::Executor for Executor {
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
self.0.spawn(f);
}
}
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
.executor(Box::new(Executor(tokio::runtime::Handle::current())))
.build()
};
// listen on the specified address
let listen_multiaddr = {
let mut m = Multiaddr::from(config.listen_address);
m.push(Protocol::Tcp(config.libp2p_port));
m
};
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
Ok(_) => {
let mut log_address = listen_multiaddr;
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
info!(log, "Listening established"; "address" => format!("{}", log_address));
}
Err(err) => {
crit!(
log,
"Unable to listen on libp2p address";
"error" => format!("{:?}", err),
"listen_multiaddr" => format!("{}", listen_multiaddr),
);
return Err("Libp2p was unable to listen on the given listen address.".into());
}
};
// helper closure for dialing peers
let mut dial_addr = |multiaddr: &Multiaddr| {
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
Err(err) => debug!(
log,
"Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err)
),
};
};
// attempt to connect to any specified boot-nodes
for bootnode_enr in &config.boot_nodes {
for multiaddr in &bootnode_enr.multiaddr() {
// ignore udp multiaddr if it exists
let components = multiaddr.iter().collect::<Vec<_>>();
if let Protocol::Udp(_) = components[1] {
continue;
}
dial_addr(multiaddr);
}
}
Ok(swarm)
}
/// Build a simple TCP transport with secio, mplex/yamux.
fn build_secio_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> {
let transport = libp2p_tcp::TokioTcpConfig::new().nodelay(true);
transport
.upgrade(core::upgrade::Version::V1)
.authenticate(secio::SecioConfig::new(local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
libp2p::yamux::Config::default(),
libp2p::mplex::MplexConfig::new(),
))
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
.timeout(Duration::from_secs(20))
.timeout(Duration::from_secs(20))
.map_err(|err| Error::new(ErrorKind::Other, err))
.boxed()
}
/// Test if the encryption falls back to secio if noise isn't available
#[tokio::test]
async fn test_secio_noise_fallback() {
// set up the logging. The level and enabled logging or not
let log_level = Level::Trace;
let enable_logging = false;
let log = common::build_log(log_level, enable_logging);
let port = common::unused_port("tcp").unwrap();
let noisy_config = common::build_config(port, vec![]);
let (_signal, exit) = exit_future::signal();
let executor =
environment::TaskExecutor::new(tokio::runtime::Handle::current(), exit, log.clone());
let mut noisy_node = Service::new(executor, &noisy_config, EnrForkId::default(), &log)
.expect("should build a libp2p instance")
.1;
let port = common::unused_port("tcp").unwrap();
let secio_config = common::build_config(port, vec![common::get_enr(&noisy_node)]);
// Building a custom Libp2pService from outside the crate isn't possible because of
// private fields in the Libp2pService struct. A swarm is good enough for testing
// compatibility with secio.
let mut secio_swarm =
build_secio_swarm(&secio_config, log.clone()).expect("should build a secio swarm");
let secio_log = log.clone();
let noisy_future = async {
loop {
noisy_node.next_event().await;
}
};
let secio_future = async {
loop {
match secio_swarm.next_event().await {
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
// secio node negotiated a secio transport with
// the noise compatible node
info!(secio_log, "Connected to peer {}", peer_id);
return;
}
_ => {} // Ignore all other events
}
}
};
tokio::select! {
_ = noisy_future => {}
_ = secio_future => {}
_ = tokio::time::delay_for(Duration::from_millis(800)) => {
panic!("Future timed out");
}
}
}

View File

@ -2,6 +2,7 @@
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::{BehaviourEvent, Libp2pEvent, Request, Response};
use slog::{debug, warn, Level};
use ssz_types::VariableList;
use std::time::Duration;
use tokio::time::delay_for;
use types::{
@ -467,11 +468,11 @@ async fn test_blocks_by_root_chunked_rpc() {
// BlocksByRoot Request
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
block_roots: vec![
block_roots: VariableList::from(vec![
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
],
]),
});
// BlocksByRoot Response
@ -579,7 +580,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// BlocksByRoot Request
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
block_roots: vec![
block_roots: VariableList::from(vec![
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
@ -590,7 +591,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0),
],
]),
});
// BlocksByRoot Response

View File

@ -43,7 +43,7 @@ impl Eth1GenesisService {
/// Creates a new service. Does not attempt to connect to the Eth1 node.
///
/// Modifies the given `config` to make it more suitable to the task of listening to genesis.
pub fn new(config: Eth1Config, log: Logger) -> Self {
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self {
let config = Eth1Config {
// Truncating the block cache makes searching for genesis more
// complicated.
@ -65,7 +65,7 @@ impl Eth1GenesisService {
};
Self {
eth1_service: Eth1Service::new(config, log),
eth1_service: Eth1Service::new(config, log, spec),
stats: Arc::new(Statistics {
highest_processed_block: AtomicU64::new(0),
active_validator_count: AtomicUsize::new(0),
@ -161,7 +161,7 @@ impl Eth1GenesisService {
log,
"Imported eth1 blocks";
"latest_block_timestamp" => eth1_service.latest_block_timestamp(),
"cache_head" => self.highest_safe_block(),
"cache_head" => eth1_service.highest_safe_block(),
"count" => outcome.blocks_imported,
);
outcome.blocks_imported
@ -205,15 +205,16 @@ impl Eth1GenesisService {
log,
"Waiting for more validators";
"min_genesis_active_validators" => spec.min_genesis_active_validator_count,
"total_deposits" => total_deposit_count,
"active_validators" => active_validator_count,
"total_deposits" => total_deposit_count,
"valid_deposits" => eth1_service.get_valid_signature_count().unwrap_or(0),
);
}
} else {
info!(
log,
"Waiting for adequate eth1 timestamp";
"ming_genesis_delay" => spec.min_genesis_delay,
"genesis_delay" => spec.genesis_delay,
"genesis_time" => spec.min_genesis_time,
"latest_eth1_timestamp" => latest_timestamp,
);
@ -255,7 +256,10 @@ impl Eth1GenesisService {
//
// Don't update the highest processed block since we want to come back and process this
// again later.
if self.highest_safe_block().map_or(true, |n| block.number > n) {
if eth1_service
.highest_safe_block()
.map_or(true, |n| block.number > n)
{
continue;
}
@ -279,7 +283,7 @@ impl Eth1GenesisService {
trace!(
log,
"Insufficient block timestamp";
"min_genesis_delay" => spec.min_genesis_delay,
"genesis_delay" => spec.genesis_delay,
"min_genesis_time" => spec.min_genesis_time,
"eth1_block_timestamp" => block.timestamp,
"eth1_block_number" => block.number,
@ -287,6 +291,21 @@ impl Eth1GenesisService {
continue;
}
let valid_signature_count = eth1_service
.get_valid_signature_count_at_block(block.number)
.unwrap_or(0);
if (valid_signature_count as u64) < spec.min_genesis_active_validator_count {
trace!(
log,
"Insufficient valid signatures";
"genesis_delay" => spec.genesis_delay,
"valid_signature_count" => valid_signature_count,
"min_validator_count" => spec.min_genesis_active_validator_count,
"eth1_block_number" => block.number,
);
continue;
}
// Generate a potential beacon state for this eth1 block.
//
// Note: this state is fully valid, some fields have been bypassed to make verification
@ -416,14 +435,6 @@ impl Eth1GenesisService {
Ok(state)
}
/// Returns the highest block that is present in both the deposit and block caches.
fn highest_safe_block(&self) -> Option<u64> {
let block_cache = self.eth1_service.blocks().read().highest_block_number()?;
let deposit_cache = self.eth1_service.deposits().read().last_processed_block?;
Some(std::cmp::min(block_cache, deposit_cache))
}
/// Returns all deposit logs included in `block_number` and all prior blocks.
fn deposit_logs_at_block(&self, block_number: u64) -> Vec<DepositLog> {
self.eth1_service

View File

@ -53,6 +53,7 @@ fn basic() {
..Eth1Config::default()
},
log,
spec.clone(),
);
// NOTE: this test is sensitive to the response speed of the external web3 server. If

View File

@ -19,10 +19,12 @@ eth2_libp2p = { path = "../eth2_libp2p" }
hashset_delay = { path = "../../common/hashset_delay" }
rest_types = { path = "../../common/rest_types" }
types = { path = "../../consensus/types" }
state_processing = { path = "../../consensus/state_processing" }
slot_clock = { path = "../../common/slot_clock" }
slog = { version = "2.5.2", features = ["max_level_trace"] }
hex = "0.4.2"
eth2_ssz = "0.1.2"
eth2_ssz_types = { path = "../../consensus/ssz_types" }
tree_hash = "0.1.0"
futures = "0.3.5"
error-chain = "0.12.2"

View File

@ -2,13 +2,14 @@
//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and
//! determines whether attestations should be aggregated and/or passed to the beacon node.
use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::{types::GossipKind, MessageId, NetworkGlobals, PeerId};
use eth2_libp2p::{types::GossipKind, NetworkGlobals};
use futures::prelude::*;
use hashset_delay::HashSetDelay;
use rand::seq::SliceRandom;
use rest_types::ValidatorSubscription;
use slog::{crit, debug, error, o, warn};
use slog::{crit, debug, error, o, trace, warn};
use slot_clock::SlotClock;
use std::collections::VecDeque;
use std::pin::Pin;
@ -189,18 +190,35 @@ impl<T: BeaconChainTypes> AttestationService<T> {
pub fn validator_subscriptions(
&mut self,
subscriptions: Vec<ValidatorSubscription>,
) -> Result<(), ()> {
) -> Result<(), String> {
for subscription in subscriptions {
metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS);
//NOTE: We assume all subscriptions have been verified before reaching this service
// Registers the validator with the attestation service.
// This will subscribe to long-lived random subnets if required.
trace!(self.log,
"Validator subscription";
"subscription" => format!("{:?}", subscription),
);
self.add_known_validator(subscription.validator_index);
let subnet_id = SubnetId::new(
subscription.attestation_committee_index
% self.beacon_chain.spec.attestation_subnet_count,
);
let subnet_id = match SubnetId::compute_subnet::<T::EthSpec>(
subscription.slot,
subscription.attestation_committee_index,
subscription.committee_count_at_slot,
&self.beacon_chain.spec,
) {
Ok(subnet_id) => subnet_id,
Err(e) => {
warn!(self.log,
"Failed to compute subnet id for validator subscription";
"error" => format!("{:?}", e),
"validator_index" => subscription.validator_index
);
continue;
}
};
let exact_subnet = ExactSubnet {
subnet_id,
@ -221,10 +239,20 @@ impl<T: BeaconChainTypes> AttestationService<T> {
// TODO: Implement
if subscription.is_aggregator {
metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS);
// set the subscription timer to subscribe to the next subnet if required
if let Err(e) = self.subscribe_to_subnet(exact_subnet) {
warn!(self.log, "Subscription to subnet error"; "error" => e);
return Err(());
if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) {
warn!(self.log,
"Subscription to subnet error";
"error" => e,
"validator_index" => subscription.validator_index,
);
} else {
trace!(self.log,
"Subscribed to subnet for aggregator duties";
"exact_subnet" => format!("{:?}", exact_subnet),
"validator_index" => subscription.validator_index
);
}
}
}
@ -235,25 +263,9 @@ impl<T: BeaconChainTypes> AttestationService<T> {
/// verification, re-propagates and returns false.
pub fn should_process_attestation(
&mut self,
_message_id: &MessageId,
peer_id: &PeerId,
subnet: SubnetId,
attestation: &Attestation<T::EthSpec>,
) -> bool {
// verify the attestation is on the correct subnet
let expected_subnet = match attestation.subnet_id(&self.beacon_chain.spec) {
Ok(v) => v,
Err(e) => {
warn!(self.log, "Could not obtain attestation subnet_id"; "error" => format!("{:?}", e));
return false;
}
};
if expected_subnet != subnet {
warn!(self.log, "Received an attestation on the wrong subnet"; "subnet_received" => format!("{:?}", subnet), "subnet_expected" => format!("{:?}",expected_subnet), "peer_id" => format!("{}", peer_id));
return false;
}
let exact_subnet = ExactSubnet {
subnet_id: subnet.clone(),
slot: attestation.data.slot,
@ -514,7 +526,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
self.random_subnets.insert(subnet_id);
// if we are not already subscribed, then subscribe
let topic_kind = &GossipKind::CommitteeIndex(subnet_id);
let topic_kind = &GossipKind::Attestation(subnet_id);
let already_subscribed = self
.network_globals
@ -577,7 +589,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
// we are also not un-subscribing from a subnet if the next slot requires us to be
// subscribed. Therefore there could be the case that we are already still subscribed
// to the required subnet. In which case we do not issue another subscription request.
let topic_kind = &GossipKind::CommitteeIndex(exact_subnet.subnet_id);
let topic_kind = &GossipKind::Attestation(exact_subnet.subnet_id);
if self
.network_globals
.gossipsub_subscriptions

View File

@ -69,8 +69,6 @@ mod tests {
Duration::from_secs(recent_genesis_time()),
Duration::from_millis(SLOT_DURATION_MILLIS),
))
.reduced_tree_fork_choice()
.expect("should add fork choice to builder")
.build()
.expect("should build"),
);
@ -110,17 +108,23 @@ mod tests {
validator_index: u64,
attestation_committee_index: CommitteeIndex,
slot: Slot,
committee_count_at_slot: u64,
) -> ValidatorSubscription {
let is_aggregator = true;
ValidatorSubscription {
validator_index,
attestation_committee_index,
slot,
committee_count_at_slot,
is_aggregator,
}
}
fn _get_subscriptions(validator_count: u64, slot: Slot) -> Vec<ValidatorSubscription> {
fn _get_subscriptions(
validator_count: u64,
slot: Slot,
committee_count_at_slot: u64,
) -> Vec<ValidatorSubscription> {
let mut subscriptions: Vec<ValidatorSubscription> = Vec::new();
for validator_index in 0..validator_count {
let is_aggregator = true;
@ -128,6 +132,7 @@ mod tests {
validator_index,
attestation_committee_index: validator_index,
slot,
committee_count_at_slot,
is_aggregator,
});
}
@ -169,6 +174,7 @@ mod tests {
let committee_index = 1;
let subscription_slot = 0;
let no_events_expected = 4;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -182,6 +188,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -190,7 +197,15 @@ mod tests {
.unwrap();
// not enough time for peer discovery, just subscribe
let expected = vec![AttServiceMessage::Subscribe(SubnetId::new(validator_index))];
let expected = vec![AttServiceMessage::Subscribe(
SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap(),
)];
let events = get_events(attestation_service, no_events_expected, 1).await;
assert_matches!(
@ -217,6 +232,7 @@ mod tests {
let committee_index = 1;
let subscription_slot = 0;
let no_events_expected = 5;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -230,6 +246,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -238,9 +255,16 @@ mod tests {
.unwrap();
// not enough time for peer discovery, just subscribe, unsubscribe
let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
let expected = vec![
AttServiceMessage::Subscribe(SubnetId::new(validator_index)),
AttServiceMessage::Unsubscribe(SubnetId::new(validator_index)),
AttServiceMessage::Subscribe(subnet_id),
AttServiceMessage::Unsubscribe(subnet_id),
];
let events = get_events(attestation_service, no_events_expected, 2).await;
@ -268,6 +292,7 @@ mod tests {
let committee_index = 1;
let subscription_slot = 5;
let no_events_expected = 4;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -281,6 +306,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -297,10 +323,14 @@ mod tests {
);
// just discover peers, don't subscribe yet
let expected = vec![AttServiceMessage::DiscoverPeers {
subnet_id: SubnetId::new(validator_index),
min_ttl,
}];
let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
let expected = vec![AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }];
let events = get_events(attestation_service, no_events_expected, 1).await;
assert_matches!(
@ -327,6 +357,7 @@ mod tests {
let committee_index = 1;
let subscription_slot = 5;
let no_events_expected = 5;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -340,6 +371,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -356,12 +388,16 @@ mod tests {
);
// we should discover peers, wait, then subscribe
let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
let expected = vec![
AttServiceMessage::DiscoverPeers {
subnet_id: SubnetId::new(validator_index),
min_ttl,
},
AttServiceMessage::Subscribe(SubnetId::new(validator_index)),
AttServiceMessage::DiscoverPeers { subnet_id, min_ttl },
AttServiceMessage::Subscribe(subnet_id),
];
let events = get_events(attestation_service, no_events_expected, 5).await;
@ -389,6 +425,7 @@ mod tests {
let committee_index = 1;
let subscription_slot = 7;
let no_events_expected = 3;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -402,6 +439,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -438,9 +476,11 @@ mod tests {
let committee_index = 1;
let subscription_slot = 10;
let no_events_expected = 4;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
let current_slot = attestation_service
.beacon_chain
.slot_clock
@ -451,6 +491,7 @@ mod tests {
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
@ -466,11 +507,17 @@ mod tests {
.unwrap(),
);
let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
// expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range
let expected: Vec<AttServiceMessage> = vec![AttServiceMessage::DiscoverPeers {
subnet_id: SubnetId::new(validator_index),
min_ttl,
}];
let expected: Vec<AttServiceMessage> =
vec![AttServiceMessage::DiscoverPeers { subnet_id, min_ttl }];
let events = get_events(attestation_service, no_events_expected, 5).await;
@ -496,6 +543,7 @@ mod tests {
// subscribe 10 slots ahead so we do not produce any exact subnet messages
let subscription_slot = 10;
let subscription_count = 64;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -505,8 +553,11 @@ mod tests {
.now()
.expect("Could not get current slot");
let subscriptions =
_get_subscriptions(subscription_count, current_slot + subscription_slot);
let subscriptions = _get_subscriptions(
subscription_count,
current_slot + subscription_slot,
committee_count,
);
// submit the subscriptions
attestation_service
@ -544,6 +595,7 @@ mod tests {
let subscription_slot = 10;
// the 65th subscription should result in no more messages than the previous scenario
let subscription_count = 65;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
@ -553,8 +605,11 @@ mod tests {
.now()
.expect("Could not get current slot");
let subscriptions =
_get_subscriptions(subscription_count, current_slot + subscription_slot);
let subscriptions = _get_subscriptions(
subscription_count,
current_slot + subscription_slot,
committee_count,
);
// submit the subscriptions
attestation_service

View File

@ -36,4 +36,16 @@ lazy_static! {
"network_gossip_aggregated_attestations_tx_total",
"Count of gossip aggregated attestations transmitted"
);
/*
* Attestation subnet subscriptions
*/
pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result<IntCounter> = try_create_int_counter(
"network_subnet_subscriptions_total",
"Count of validator subscription requests."
);
pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result<IntCounter> = try_create_int_counter(
"network_subnet_subscriptions_aggregator_total",
"Count of validator subscription requests where the subscriber is an aggregator."
);
}

View File

@ -232,6 +232,7 @@ impl<T: BeaconChainTypes> Router<T> {
self.processor.verify_unaggregated_attestation_for_gossip(
peer_id.clone(),
subnet_attestation.1.clone(),
subnet_attestation.0,
)
{
self.propagate_message(id, peer_id.clone());
@ -254,23 +255,45 @@ impl<T: BeaconChainTypes> Router<T> {
}
}
}
PubsubMessage::VoluntaryExit(_exit) => {
// TODO: Apply more sophisticated validation
self.propagate_message(id, peer_id.clone());
// TODO: Handle exits
debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) );
PubsubMessage::VoluntaryExit(exit) => {
debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id));
if let Some(verified_exit) = self
.processor
.verify_voluntary_exit_for_gossip(&peer_id, *exit)
{
self.propagate_message(id, peer_id.clone());
self.processor.import_verified_voluntary_exit(verified_exit);
}
}
PubsubMessage::ProposerSlashing(_proposer_slashing) => {
// TODO: Apply more sophisticated validation
self.propagate_message(id, peer_id.clone());
// TODO: Handle proposer slashings
debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) );
PubsubMessage::ProposerSlashing(proposer_slashing) => {
debug!(
self.log,
"Received a proposer slashing";
"peer_id" => format!("{}", peer_id)
);
if let Some(verified_proposer_slashing) = self
.processor
.verify_proposer_slashing_for_gossip(&peer_id, *proposer_slashing)
{
self.propagate_message(id, peer_id.clone());
self.processor
.import_verified_proposer_slashing(verified_proposer_slashing);
}
}
PubsubMessage::AttesterSlashing(_attester_slashing) => {
// TODO: Apply more sophisticated validation
self.propagate_message(id, peer_id.clone());
// TODO: Handle attester slashings
debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) );
PubsubMessage::AttesterSlashing(attester_slashing) => {
debug!(
self.log,
"Received a attester slashing";
"peer_id" => format!("{}", peer_id)
);
if let Some(verified_attester_slashing) = self
.processor
.verify_attester_slashing_for_gossip(&peer_id, *attester_slashing)
{
self.propagate_message(id, peer_id.clone());
self.processor
.import_verified_attester_slashing(verified_attester_slashing);
}
}
}
}

View File

@ -2,21 +2,24 @@ use crate::service::NetworkMessage;
use crate::sync::{PeerSyncInfo, SyncMessage};
use beacon_chain::{
attestation_verification::{
Error as AttnError, IntoForkChoiceVerifiedAttestation, VerifiedAggregatedAttestation,
Error as AttnError, SignatureVerifiedAttestation, VerifiedAggregatedAttestation,
VerifiedUnaggregatedAttestation,
},
BeaconChain, BeaconChainTypes, BlockError, BlockProcessingOutcome, GossipVerifiedBlock,
observed_operations::ObservationOutcome,
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProcessingOutcome,
ForkChoiceError, GossipVerifiedBlock,
};
use eth2_libp2p::rpc::*;
use eth2_libp2p::{NetworkGlobals, PeerId, PeerRequestId, Request, Response};
use itertools::process_results;
use slog::{debug, error, o, trace, warn};
use ssz::Encode;
use state_processing::SigVerifiedOp;
use std::sync::Arc;
use tokio::sync::mpsc;
use types::{
Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock,
Slot,
Attestation, AttesterSlashing, ChainSpec, Epoch, EthSpec, Hash256, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId,
};
//TODO: Rate limit requests
@ -322,7 +325,7 @@ impl<T: BeaconChainTypes> Processor<T> {
&mut self,
peer_id: PeerId,
request_id: PeerRequestId,
req: BlocksByRangeRequest,
mut req: BlocksByRangeRequest,
) {
debug!(
self.log,
@ -333,6 +336,10 @@ impl<T: BeaconChainTypes> Processor<T> {
"step" => req.step,
);
// Should not send more than max request blocks
if req.count > MAX_REQUEST_BLOCKS {
req.count = MAX_REQUEST_BLOCKS;
}
if req.step == 0 {
warn!(self.log,
"Peer sent invalid range request";
@ -753,6 +760,18 @@ impl<T: BeaconChainTypes> Processor<T> {
* The peer has published an invalid consensus message.
*/
}
AttnError::InvalidSubnetId { received, expected } => {
/*
* The attestation was received on an incorrect subnet id.
*/
debug!(
self.log,
"Received attestation on incorrect subnet";
"expected" => format!("{:?}", expected),
"received" => format!("{:?}", received),
)
}
AttnError::Invalid(_) => {
/*
* The attestation failed the state_processing verification.
@ -828,12 +847,13 @@ impl<T: BeaconChainTypes> Processor<T> {
&mut self,
peer_id: PeerId,
unaggregated_attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
) -> Option<VerifiedUnaggregatedAttestation<T>> {
// This is provided to the error handling function to assist with debugging.
let beacon_block_root = unaggregated_attestation.data.beacon_block_root;
self.chain
.verify_unaggregated_attestation_for_gossip(unaggregated_attestation)
.verify_unaggregated_attestation_for_gossip(unaggregated_attestation, subnet_id)
.map_err(|e| {
self.handle_attestation_verification_failure(
peer_id,
@ -886,16 +906,163 @@ impl<T: BeaconChainTypes> Processor<T> {
&self,
peer_id: PeerId,
beacon_block_root: Hash256,
attestation: &'a impl IntoForkChoiceVerifiedAttestation<'a, T>,
attestation: &'a impl SignatureVerifiedAttestation<T>,
) {
if let Err(e) = self.chain.apply_attestation_to_fork_choice(attestation) {
debug!(
self.log,
"Attestation invalid for fork choice";
"reason" => format!("{:?}", e),
"peer" => format!("{:?}", peer_id),
"beacon_block_root" => format!("{:?}", beacon_block_root)
)
match e {
BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => {
debug!(
self.log,
"Attestation invalid for fork choice";
"reason" => format!("{:?}", e),
"peer" => format!("{:?}", peer_id),
"beacon_block_root" => format!("{:?}", beacon_block_root)
)
}
e => error!(
self.log,
"Error applying attestation to fork choice";
"reason" => format!("{:?}", e),
"peer" => format!("{:?}", peer_id),
"beacon_block_root" => format!("{:?}", beacon_block_root)
),
}
}
}
/// Verify a voluntary exit before gossiping or processing it.
///
/// Errors are logged at debug level.
pub fn verify_voluntary_exit_for_gossip(
&self,
peer_id: &PeerId,
voluntary_exit: SignedVoluntaryExit,
) -> Option<SigVerifiedOp<SignedVoluntaryExit>> {
let validator_index = voluntary_exit.message.validator_index;
match self.chain.verify_voluntary_exit_for_gossip(voluntary_exit) {
Ok(ObservationOutcome::New(sig_verified_exit)) => Some(sig_verified_exit),
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
self.log,
"Dropping exit for already exiting validator";
"validator_index" => validator_index,
"peer" => format!("{:?}", peer_id)
);
None
}
Err(e) => {
debug!(
self.log,
"Dropping invalid exit";
"validator_index" => validator_index,
"peer" => format!("{:?}", peer_id),
"error" => format!("{:?}", e)
);
None
}
}
}
/// Import a verified exit into the op pool.
pub fn import_verified_voluntary_exit(
&self,
verified_voluntary_exit: SigVerifiedOp<SignedVoluntaryExit>,
) {
self.chain.import_voluntary_exit(verified_voluntary_exit);
debug!(self.log, "Successfully imported voluntary exit");
}
/// Verify a proposer slashing before gossiping or processing it.
///
/// Errors are logged at debug level.
pub fn verify_proposer_slashing_for_gossip(
&self,
peer_id: &PeerId,
proposer_slashing: ProposerSlashing,
) -> Option<SigVerifiedOp<ProposerSlashing>> {
let validator_index = proposer_slashing.signed_header_1.message.proposer_index;
match self
.chain
.verify_proposer_slashing_for_gossip(proposer_slashing)
{
Ok(ObservationOutcome::New(verified_slashing)) => Some(verified_slashing),
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
self.log,
"Dropping proposer slashing";
"reason" => "Already seen a proposer slashing for that validator",
"validator_index" => validator_index,
"peer" => format!("{:?}", peer_id)
);
None
}
Err(e) => {
debug!(
self.log,
"Dropping invalid proposer slashing";
"validator_index" => validator_index,
"peer" => format!("{:?}", peer_id),
"error" => format!("{:?}", e)
);
None
}
}
}
/// Import a verified proposer slashing into the op pool.
pub fn import_verified_proposer_slashing(
&self,
proposer_slashing: SigVerifiedOp<ProposerSlashing>,
) {
self.chain.import_proposer_slashing(proposer_slashing);
debug!(self.log, "Successfully imported proposer slashing");
}
/// Verify an attester slashing before gossiping or processing it.
///
/// Errors are logged at debug level.
pub fn verify_attester_slashing_for_gossip(
&self,
peer_id: &PeerId,
attester_slashing: AttesterSlashing<T::EthSpec>,
) -> Option<SigVerifiedOp<AttesterSlashing<T::EthSpec>>> {
match self
.chain
.verify_attester_slashing_for_gossip(attester_slashing)
{
Ok(ObservationOutcome::New(verified_slashing)) => Some(verified_slashing),
Ok(ObservationOutcome::AlreadyKnown) => {
debug!(
self.log,
"Dropping attester slashing";
"reason" => "Slashings already known for all slashed validators",
"peer" => format!("{:?}", peer_id)
);
None
}
Err(e) => {
debug!(
self.log,
"Dropping invalid attester slashing";
"peer" => format!("{:?}", peer_id),
"error" => format!("{:?}", e)
);
None
}
}
}
/// Import a verified attester slashing into the op pool.
pub fn import_verified_attester_slashing(
&self,
attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>>,
) {
if let Err(e) = self.chain.import_attester_slashing(attester_slashing) {
debug!(self.log, "Error importing attester slashing"; "error" => format!("{:?}", e));
} else {
debug!(self.log, "Successfully imported attester slashing");
}
}
}

View File

@ -14,7 +14,7 @@ use eth2_libp2p::{
use eth2_libp2p::{BehaviourEvent, MessageId, NetworkGlobals, PeerId};
use futures::prelude::*;
use rest_types::ValidatorSubscription;
use slog::{debug, error, info, o, trace};
use slog::{debug, error, info, o, trace, warn};
use std::sync::Arc;
use std::time::Duration;
use store::HotColdDB;
@ -207,10 +207,11 @@ fn spawn_service<T: BeaconChainTypes>(
);
}
NetworkMessage::Subscribe { subscriptions } => {
// the result is dropped as it used solely for ergonomics
let _ = service
if let Err(e) = service
.attestation_service
.validator_subscriptions(subscriptions);
.validator_subscriptions(subscriptions) {
warn!(service.log, "Validator subscription failed"; "error" => e);
}
}
}
}
@ -293,14 +294,12 @@ fn spawn_service<T: BeaconChainTypes>(
match message {
// attestation information gets processed in the attestation service
PubsubMessage::Attestation(ref subnet_and_attestation) => {
let subnet = &subnet_and_attestation.0;
let subnet = subnet_and_attestation.0;
let attestation = &subnet_and_attestation.1;
// checks if we have an aggregator for the slot. If so, we process
// the attestation
if service.attestation_service.should_process_attestation(
&id,
&source,
*subnet,
subnet,
attestation,
) {
let _ = service

View File

@ -36,16 +36,17 @@
use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId};
use super::network_context::SyncNetworkContext;
use super::peer_sync_info::{PeerSyncInfo, PeerSyncType};
use super::range_sync::{BatchId, ChainId, RangeSync};
use super::range_sync::{BatchId, ChainId, RangeSync, EPOCHS_PER_BATCH};
use super::RequestId;
use crate::service::NetworkMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::rpc::BlocksByRootRequest;
use eth2_libp2p::rpc::{methods::MAX_REQUEST_BLOCKS, BlocksByRootRequest};
use eth2_libp2p::types::NetworkGlobals;
use eth2_libp2p::PeerId;
use fnv::FnvHashMap;
use slog::{crit, debug, error, info, trace, warn, Logger};
use smallvec::SmallVec;
use ssz_types::VariableList;
use std::boxed::Box;
use std::ops::Sub;
use std::sync::Arc;
@ -188,6 +189,10 @@ pub fn spawn<T: BeaconChainTypes>(
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
log: slog::Logger,
) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> {
assert!(
MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH,
"Max blocks that can be requested in a single batch greater than max allowed blocks in a single request"
);
// generate the message channel
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
@ -269,7 +274,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// by one and their head_slot is within the slot tolerance, consider this peer
// fully synced.
if (self.chain.fork_choice.contains_block(&remote.head_root)) || // the first case
if (self.chain.fork_choice.read().contains_block(&remote.head_root)) || // the first case
(remote.finalized_epoch.sub(local_peer_info.finalized_epoch) == 1 && remote.head_slot.sub(local_peer_info.head_slot) < SLOT_IMPORT_TOLERANCE as u64)
// the second case
{
@ -497,7 +502,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
}
let request = BlocksByRootRequest {
block_roots: vec![block_hash],
block_roots: VariableList::from(vec![block_hash]),
};
if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) {
@ -715,7 +720,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
};
let request = BlocksByRootRequest {
block_roots: vec![parent_hash],
block_roots: VariableList::from(vec![parent_hash]),
};
// We continue to search for the chain of blocks from the same peer. Other peers are not

View File

@ -3,6 +3,7 @@ use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::PeerId;
use fnv::FnvHashMap;
use ssz::Encode;
use std::cmp::min;
use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
@ -75,7 +76,7 @@ impl<T: EthSpec> Batch<T> {
pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest {
BlocksByRangeRequest {
start_slot: self.start_slot.into(),
count: std::cmp::min(
count: min(
T::slots_per_epoch() * EPOCHS_PER_BATCH,
self.end_slot.sub(self.start_slot).into(),
),

View File

@ -455,6 +455,7 @@ impl<T: BeaconChainTypes> ChainCollection<T> {
if chain.target_head_slot <= local_finalized_slot
|| beacon_chain
.fork_choice
.read()
.contains_block(&chain.target_head_root)
{
debug!(log_ref, "Purging out of finalized chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot);
@ -468,6 +469,7 @@ impl<T: BeaconChainTypes> ChainCollection<T> {
if chain.target_head_slot <= local_finalized_slot
|| beacon_chain
.fork_choice
.read()
.contains_block(&chain.target_head_root)
{
debug!(log_ref, "Purging out of date head chain"; "start_epoch" => chain.start_epoch, "end_slot" => chain.target_head_slot);

View File

@ -9,5 +9,5 @@ mod sync_type;
pub use batch::Batch;
pub use batch::BatchId;
pub use chain::ChainId;
pub use chain::{ChainId, EPOCHS_PER_BATCH};
pub use range::RangeSync;

View File

@ -30,6 +30,7 @@ impl RangeSyncType {
if remote_info.finalized_epoch > local_info.finalized_epoch
&& !chain
.fork_choice
.read()
.contains_block(&remote_info.finalized_root)
{
RangeSyncType::Finalized

View File

@ -9,29 +9,27 @@ use attestation::AttMaxCover;
use attestation_id::AttestationId;
use max_cover::maximum_cover;
use parking_lot::RwLock;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, ExitValidationError,
ProposerSlashingValidationError,
};
use state_processing::per_block_processing::errors::AttestationValidationError;
use state_processing::per_block_processing::{
get_slashable_indices_modular, verify_attestation_for_block_inclusion,
verify_attester_slashing, verify_exit, verify_exit_time_independent_only,
verify_proposer_slashing, VerifySignatures,
get_slashable_indices, get_slashable_indices_modular, verify_attestation_for_block_inclusion,
verify_exit, VerifySignatures,
};
use state_processing::SigVerifiedOp;
use std::collections::{hash_map, HashMap, HashSet};
use std::marker::PhantomData;
use std::ptr;
use types::{
typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec,
EthSpec, Fork, Hash256, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Validator,
EthSpec, Fork, ForkVersion, Hash256, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit,
Validator,
};
#[derive(Default, Debug)]
pub struct OperationPool<T: EthSpec + Default> {
/// Map from attestation ID (see below) to vectors of attestations.
attestations: RwLock<HashMap<AttestationId, Vec<Attestation<T>>>>,
/// Map from two attestation IDs to a slashing for those IDs.
attester_slashings: RwLock<HashMap<(AttestationId, AttestationId), AttesterSlashing<T>>>,
/// Set of attester slashings, and the fork version they were verified against.
attester_slashings: RwLock<HashSet<(AttesterSlashing<T>, ForkVersion)>>,
/// Map from proposer index to slashing.
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
/// Map from exiting validator to their exit data.
@ -175,54 +173,23 @@ impl<T: EthSpec> OperationPool<T> {
/// Insert a proposer slashing into the pool.
pub fn insert_proposer_slashing(
&self,
slashing: ProposerSlashing,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), ProposerSlashingValidationError> {
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
// because they could *become* known later
verify_proposer_slashing(&slashing, state, VerifySignatures::True, spec)?;
verified_proposer_slashing: SigVerifiedOp<ProposerSlashing>,
) {
let slashing = verified_proposer_slashing.into_inner();
self.proposer_slashings
.write()
.insert(slashing.signed_header_1.message.proposer_index, slashing);
Ok(())
}
/// Compute the tuple ID that is used to identify an attester slashing.
///
/// Depends on the fork field of the state, but not on the state's epoch.
fn attester_slashing_id(
slashing: &AttesterSlashing<T>,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> (AttestationId, AttestationId) {
(
AttestationId::from_data(
&slashing.attestation_1.data,
&state.fork,
state.genesis_validators_root,
spec,
),
AttestationId::from_data(
&slashing.attestation_2.data,
&state.fork,
state.genesis_validators_root,
spec,
),
)
}
/// Insert an attester slashing into the pool.
pub fn insert_attester_slashing(
&self,
slashing: AttesterSlashing<T>,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), AttesterSlashingValidationError> {
verify_attester_slashing(state, &slashing, VerifySignatures::True, spec)?;
let id = Self::attester_slashing_id(&slashing, state, spec);
self.attester_slashings.write().insert(id, slashing);
Ok(())
verified_slashing: SigVerifiedOp<AttesterSlashing<T>>,
fork: Fork,
) {
self.attester_slashings
.write()
.insert((verified_slashing.into_inner(), fork.current_version));
}
/// Get proposer and attester slashings for inclusion in a block.
@ -233,7 +200,6 @@ impl<T: EthSpec> OperationPool<T> {
pub fn get_slashings(
&self,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing<T>>) {
let proposer_slashings = filter_limit_operations(
self.proposer_slashings.read().values(),
@ -258,11 +224,11 @@ impl<T: EthSpec> OperationPool<T> {
.attester_slashings
.read()
.iter()
.filter(|(id, slashing)| {
// Check the fork.
Self::attester_slashing_id(slashing, state, spec) == **id
})
.filter(|(_, slashing)| {
.filter(|(slashing, fork)| {
if *fork != state.fork.previous_version && *fork != state.fork.current_version {
return false;
}
// Take all slashings that will slash 1 or more validators.
let slashed_validators =
get_slashable_indices_modular(state, slashing, |index, validator| {
@ -279,7 +245,7 @@ impl<T: EthSpec> OperationPool<T> {
}
})
.take(T::MaxAttesterSlashings::to_usize())
.map(|(_, slashing)| slashing.clone())
.map(|(slashing, _)| slashing.clone())
.collect();
(proposer_slashings, attester_slashings)
@ -298,17 +264,20 @@ impl<T: EthSpec> OperationPool<T> {
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
/// fork.
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
self.attester_slashings.write().retain(|id, slashing| {
let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id;
let curr_epoch = finalized_state.current_epoch();
let slashing_ok =
get_slashable_indices_modular(finalized_state, slashing, |_, validator| {
validator.slashed || validator.is_withdrawable_at(curr_epoch)
})
.is_ok();
fork_ok && slashing_ok
});
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState<T>, head_fork: Fork) {
self.attester_slashings
.write()
.retain(|(slashing, fork_version)| {
// Any slashings for forks older than the finalized state's previous fork can be
// discarded. We allow the head_fork's current version too in case a fork has
// occurred between the finalized state and the head.
let fork_ok = *fork_version == finalized_state.fork.previous_version
|| *fork_version == finalized_state.fork.current_version
|| *fork_version == head_fork.current_version;
// Slashings that don't slash any validators can also be dropped.
let slashing_ok = get_slashable_indices(finalized_state, slashing).is_ok();
fork_ok && slashing_ok
});
}
/// Total number of attester slashings in the pool.
@ -321,18 +290,12 @@ impl<T: EthSpec> OperationPool<T> {
self.proposer_slashings.read().len()
}
/// Insert a voluntary exit, validating it almost-entirely (future exits are permitted).
pub fn insert_voluntary_exit(
&self,
exit: SignedVoluntaryExit,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), ExitValidationError> {
verify_exit_time_independent_only(state, &exit, VerifySignatures::True, spec)?;
/// Insert a voluntary exit that has previously been checked elsewhere.
pub fn insert_voluntary_exit(&self, verified_exit: SigVerifiedOp<SignedVoluntaryExit>) {
let exit = verified_exit.into_inner();
self.voluntary_exits
.write()
.insert(exit.message.validator_index, exit);
Ok(())
}
/// Get a list of voluntary exits for inclusion in a block.
@ -357,11 +320,11 @@ impl<T: EthSpec> OperationPool<T> {
);
}
/// Prune all types of transactions given the latest finalized state.
pub fn prune_all(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
/// Prune all types of transactions given the latest finalized state and head fork.
pub fn prune_all(&self, finalized_state: &BeaconState<T>, head_fork: Fork) {
self.prune_attestations(finalized_state);
self.prune_proposer_slashings(finalized_state);
self.prune_attester_slashings(finalized_state, spec);
self.prune_attester_slashings(finalized_state, head_fork);
self.prune_voluntary_exits(finalized_state);
}
@ -424,7 +387,10 @@ impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
mod release_tests {
use super::attestation::earliest_attestation_validators;
use super::*;
use state_processing::common::{get_attesting_indices, get_base_reward};
use state_processing::{
common::{get_attesting_indices, get_base_reward},
VerifyOperation,
};
use std::collections::BTreeSet;
use std::iter::FromIterator;
use types::test_utils::*;
@ -895,43 +861,99 @@ mod release_tests {
}
}
struct TestContext {
spec: ChainSpec,
state: BeaconState<MainnetEthSpec>,
keypairs: Vec<Keypair>,
op_pool: OperationPool<MainnetEthSpec>,
}
impl TestContext {
fn new() -> Self {
let spec = MainnetEthSpec::default_spec();
let num_validators = 32;
let mut state_builder =
TestingBeaconStateBuilder::<MainnetEthSpec>::from_deterministic_keypairs(
num_validators,
&spec,
);
state_builder.build_caches(&spec).unwrap();
let (state, keypairs) = state_builder.build();
let op_pool = OperationPool::new();
TestContext {
spec,
state,
keypairs,
op_pool,
}
}
fn proposer_slashing(&self, proposer_index: u64) -> ProposerSlashing {
TestingProposerSlashingBuilder::double_vote::<MainnetEthSpec>(
ProposerSlashingTestTask::Valid,
proposer_index,
&self.keypairs[proposer_index as usize].sk,
&self.state.fork,
self.state.genesis_validators_root,
&self.spec,
)
}
fn attester_slashing(&self, slashed_indices: &[u64]) -> AttesterSlashing<MainnetEthSpec> {
let signer =
|idx: u64, message: &[u8]| Signature::new(message, &self.keypairs[idx as usize].sk);
TestingAttesterSlashingBuilder::double_vote(
AttesterSlashingTestTask::Valid,
slashed_indices,
signer,
&self.state.fork,
self.state.genesis_validators_root,
&self.spec,
)
}
}
/// Insert two slashings for the same proposer and ensure only one is returned.
#[test]
fn duplicate_proposer_slashing() {
let spec = MainnetEthSpec::default_spec();
let num_validators = 32;
let mut state_builder =
TestingBeaconStateBuilder::<MainnetEthSpec>::from_deterministic_keypairs(
num_validators,
&spec,
);
state_builder.build_caches(&spec).unwrap();
let (state, keypairs) = state_builder.build();
let op_pool = OperationPool::new();
let ctxt = TestContext::new();
let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec);
let proposer_index = 0;
let slashing1 = TestingProposerSlashingBuilder::double_vote::<MainnetEthSpec>(
ProposerSlashingTestTask::Valid,
proposer_index,
&keypairs[proposer_index as usize].sk,
&state.fork,
state.genesis_validators_root,
&spec,
);
let slashing1 = ctxt.proposer_slashing(proposer_index);
let slashing2 = ProposerSlashing {
signed_header_1: slashing1.signed_header_2.clone(),
signed_header_2: slashing1.signed_header_1.clone(),
};
// Both slashings should be accepted by the pool.
op_pool
.insert_proposer_slashing(slashing1.clone(), &state, &spec)
.unwrap();
op_pool
.insert_proposer_slashing(slashing2.clone(), &state, &spec)
.unwrap();
// Both slashings should be valid and accepted by the pool.
op_pool.insert_proposer_slashing(slashing1.clone().validate(state, spec).unwrap());
op_pool.insert_proposer_slashing(slashing2.clone().validate(state, spec).unwrap());
// Should only get the second slashing back.
assert_eq!(op_pool.get_slashings(&state, &spec).0, vec![slashing2]);
assert_eq!(op_pool.get_slashings(state).0, vec![slashing2]);
}
// Sanity check on the pruning of proposer slashings
#[test]
fn prune_proposer_slashing_noop() {
let ctxt = TestContext::new();
let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec);
let slashing = ctxt.proposer_slashing(0);
op_pool.insert_proposer_slashing(slashing.clone().validate(state, spec).unwrap());
op_pool.prune_proposer_slashings(state);
assert_eq!(op_pool.get_slashings(state).0, vec![slashing]);
}
// Sanity check on the pruning of attester slashings
#[test]
fn prune_attester_slashing_noop() {
let ctxt = TestContext::new();
let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec);
let slashing = ctxt.attester_slashing(&[1, 3, 5, 7, 9]);
op_pool
.insert_attester_slashing(slashing.clone().validate(state, spec).unwrap(), state.fork);
op_pool.prune_attester_slashings(state, state.fork);
assert_eq!(op_pool.get_slashings(state).1, vec![slashing]);
}
}

View File

@ -19,7 +19,7 @@ pub struct PersistedOperationPool<T: EthSpec> {
// be difficult to make that roundtrip due to eager aggregation.
attestations: Vec<(AttestationId, Vec<Attestation<T>>)>,
/// Attester slashings.
attester_slashings: Vec<AttesterSlashing<T>>,
attester_slashings: Vec<(AttesterSlashing<T>, ForkVersion)>,
/// Proposer slashings.
proposer_slashings: Vec<ProposerSlashing>,
/// Voluntary exits.
@ -40,7 +40,7 @@ impl<T: EthSpec> PersistedOperationPool<T> {
.attester_slashings
.read()
.iter()
.map(|(_, slashing)| slashing.clone())
.cloned()
.collect();
let proposer_slashings = operation_pool
@ -66,19 +66,9 @@ impl<T: EthSpec> PersistedOperationPool<T> {
}
/// Reconstruct an `OperationPool`.
pub fn into_operation_pool(self, state: &BeaconState<T>, spec: &ChainSpec) -> OperationPool<T> {
pub fn into_operation_pool(self) -> OperationPool<T> {
let attestations = RwLock::new(self.attestations.into_iter().collect());
let attester_slashings = RwLock::new(
self.attester_slashings
.into_iter()
.map(|slashing| {
(
OperationPool::attester_slashing_id(&slashing, state, spec),
slashing,
)
})
.collect(),
);
let attester_slashings = RwLock::new(self.attester_slashings.into_iter().collect());
let proposer_slashings = RwLock::new(
self.proposer_slashings
.into_iter()

View File

@ -12,7 +12,13 @@ pub fn get_fork_choice<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&*beacon_chain.fork_choice.core_proto_array())
ResponseBuilder::new(&req)?.body_no_ssz(
&*beacon_chain
.fork_choice
.read()
.proto_array()
.core_proto_array(),
)
}
/// Returns the `PersistedOperationPool` struct.

View File

@ -2,7 +2,9 @@ use crate::helpers::*;
use crate::response_builder::ResponseBuilder;
use crate::validator::get_state_for_epoch;
use crate::{ApiError, ApiResult, UrlQuery};
use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig};
use beacon_chain::{
observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig,
};
use bus::BusReader;
use futures::executor::block_on;
use hyper::body::Bytes;
@ -504,31 +506,23 @@ pub async fn proposer_slashing<T: BeaconChainTypes>(
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?;
serde_json::from_slice::<ProposerSlashing>(&chunks)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into ProposerSlashing: {:?}",
e
))
})
.map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e))
.and_then(move |proposer_slashing| {
let spec = &beacon_chain.spec;
let state = &beacon_chain.head().unwrap().beacon_state;
if beacon_chain.eth1_chain.is_some() {
beacon_chain
.op_pool
.insert_proposer_slashing(proposer_slashing, state, spec)
.map_err(|e| {
ApiError::BadRequest(format!(
"Error while inserting proposer slashing: {:?}",
e
))
})
let obs_outcome = beacon_chain
.verify_proposer_slashing_for_gossip(proposer_slashing)
.map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?;
if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome {
beacon_chain.import_proposer_slashing(verified_proposer_slashing);
Ok(())
} else {
Err("Proposer slashing for that validator index already known".into())
}
} else {
return Err(ApiError::BadRequest(
"Cannot insert proposer slashing on node without Eth1 connection.".to_string(),
));
Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string())
}
})
.map_err(ApiError::BadRequest)
.and_then(|_| response_builder?.body(&true))
}
@ -551,18 +545,24 @@ pub async fn attester_slashing<T: BeaconChainTypes>(
))
})
.and_then(move |attester_slashing| {
let spec = &beacon_chain.spec;
let state = &beacon_chain.head().unwrap().beacon_state;
if beacon_chain.eth1_chain.is_some() {
beacon_chain
.op_pool
.insert_attester_slashing(attester_slashing, state, spec)
.map_err(|e| {
ApiError::BadRequest(format!(
"Error while inserting attester slashing: {:?}",
e
))
.verify_attester_slashing_for_gossip(attester_slashing)
.map_err(|e| format!("Error while verifying attester slashing: {:?}", e))
.and_then(|outcome| {
if let ObservationOutcome::New(verified_attester_slashing) = outcome {
beacon_chain
.import_attester_slashing(verified_attester_slashing)
.map_err(|e| {
format!("Error while importing attester slashing: {:?}", e)
})
} else {
Err(format!(
"Attester slashing only covers already slashed indices"
))
}
})
.map_err(ApiError::BadRequest)
} else {
Err(ApiError::BadRequest(
"Cannot insert attester slashing on node without Eth1 connection.".to_string(),

View File

@ -2,8 +2,8 @@ use crate::helpers::{check_content_type_for_json, publish_beacon_block_to_networ
use crate::response_builder::ResponseBuilder;
use crate::{ApiError, ApiResult, NetworkChannel, UrlQuery};
use beacon_chain::{
attestation_verification::Error as AttnError, BeaconChain, BeaconChainTypes, BlockError,
StateSkipConfig,
attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
BlockError, ForkChoiceError, StateSkipConfig,
};
use bls::PublicKeyBytes;
use eth2_libp2p::PubsubMessage;
@ -16,7 +16,7 @@ use std::sync::Arc;
use types::beacon_state::EthSpec;
use types::{
Attestation, AttestationData, BeaconState, Epoch, RelativeEpoch, SelectionProof,
SignedAggregateAndProof, SignedBeaconBlock, Slot,
SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId,
};
/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This
@ -220,6 +220,16 @@ fn return_validator_duties<T: BeaconChainTypes>(
))
})?;
let committee_count_at_slot = duties
.map(|d| state.get_committee_count_at_slot(d.slot))
.transpose()
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to find committee count at slot: {:?}",
e
))
})?;
let aggregator_modulo = duties
.map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec))
.transpose()
@ -238,6 +248,7 @@ fn return_validator_duties<T: BeaconChainTypes>(
validator_index: Some(validator_index as u64),
attestation_slot: duties.map(|d| d.slot),
attestation_committee_index: duties.map(|d| d.index),
committee_count_at_slot,
attestation_committee_position: duties.map(|d| d.committee_position),
block_proposal_slots,
aggregator_modulo,
@ -249,6 +260,7 @@ fn return_validator_duties<T: BeaconChainTypes>(
attestation_slot: None,
attestation_committee_index: None,
attestation_committee_position: None,
committee_count_at_slot: None,
block_proposal_slots: vec![],
aggregator_modulo: None,
})
@ -443,21 +455,24 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
))
})
// Process all of the aggregates _without_ exiting early if one fails.
.map(move |attestations: Vec<Attestation<T::EthSpec>>| {
attestations
.into_par_iter()
.enumerate()
.map(|(i, attestation)| {
process_unaggregated_attestation(
&beacon_chain,
network_chan.clone(),
attestation,
i,
&log,
)
})
.collect::<Vec<Result<_, _>>>()
})
.map(
move |attestations: Vec<(Attestation<T::EthSpec>, SubnetId)>| {
attestations
.into_par_iter()
.enumerate()
.map(|(i, (attestation, subnet_id))| {
process_unaggregated_attestation(
&beacon_chain,
network_chan.clone(),
attestation,
subnet_id,
i,
&log,
)
})
.collect::<Vec<Result<_, _>>>()
},
)
// Iterate through all the results and return on the first `Err`.
//
// Note: this will only provide info about the _first_ failure, not all failures.
@ -472,6 +487,7 @@ fn process_unaggregated_attestation<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
network_chan: NetworkChannel<T::EthSpec>,
attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
i: usize,
log: &Logger,
) -> Result<(), ApiError> {
@ -479,7 +495,7 @@ fn process_unaggregated_attestation<T: BeaconChainTypes>(
// Verify that the attestation is valid to included on the gossip network.
let verified_attestation = beacon_chain
.verify_unaggregated_attestation_for_gossip(attestation.clone())
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id)
.map_err(|e| {
handle_attestation_error(
e,
@ -492,9 +508,7 @@ fn process_unaggregated_attestation<T: BeaconChainTypes>(
// Publish the attestation to the network
if let Err(e) = network_chan.send(NetworkMessage::Publish {
messages: vec![PubsubMessage::Attestation(Box::new((
attestation
.subnet_id(&beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to get subnet id: {:?}", e)))?,
subnet_id,
attestation,
)))],
}) {
@ -507,7 +521,7 @@ fn process_unaggregated_attestation<T: BeaconChainTypes>(
beacon_chain
.apply_attestation_to_fork_choice(&verified_attestation)
.map_err(|e| {
handle_attestation_error(
handle_fork_choice_error(
e,
&format!(
"unaggregated attestation {} was unable to be added to fork choice",
@ -648,7 +662,7 @@ fn process_aggregated_attestation<T: BeaconChainTypes>(
beacon_chain
.apply_attestation_to_fork_choice(&verified_attestation)
.map_err(|e| {
handle_attestation_error(
handle_fork_choice_error(
e,
&format!(
"aggregated attestation {} was unable to be added to fork choice",
@ -720,3 +734,48 @@ fn handle_attestation_error(
}
}
}
/// Common handler for `ForkChoiceError` during attestation verification.
fn handle_fork_choice_error(
e: BeaconChainError,
detail: &str,
data: &AttestationData,
log: &Logger,
) -> ApiError {
match e {
BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => {
error!(
log,
"Local attestation invalid for fork choice";
"detail" => detail,
"reason" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ProcessingError(format!(
"Invalid local attestation. Error: {:?} Detail: {}",
e, detail
))
}
e => {
error!(
log,
"Internal error applying attn to fork choice";
"detail" => detail,
"error" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ServerError(format!(
"Internal error verifying local attestation. Error: {:?}. Detail: {}",
e, detail
))
}
}
}

View File

@ -14,6 +14,7 @@ use remote_beacon_node::{
use rest_types::ValidatorDutyBytes;
use std::convert::TryInto;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use types::{
test_utils::{
build_double_vote_attester_slashing, build_proposer_slashing,
@ -21,7 +22,7 @@ use types::{
},
BeaconBlock, BeaconState, ChainSpec, Domain, Epoch, EthSpec, MinimalEthSpec, PublicKey,
RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot,
Validator,
SubnetId, Validator,
};
use version;
@ -143,7 +144,16 @@ fn validator_produce_attestation() {
))
.expect("should fetch duties from http api");
let duties = &duties[0];
let committee_count = duties
.committee_count_at_slot
.expect("should have committee count");
let subnet_id = SubnetId::compute_subnet::<E>(
attestation.data.slot,
attestation.data.index,
committee_count,
spec,
)
.unwrap();
// Try publishing the attestation without a signature or a committee bit set, ensure it is
// raises an error.
let publish_status = env
@ -152,7 +162,7 @@ fn validator_produce_attestation() {
remote_node
.http
.validator()
.publish_attestations(vec![attestation.clone()]),
.publish_attestations(vec![(attestation.clone(), subnet_id)]),
)
.expect("should publish unsigned attestation");
assert!(
@ -178,7 +188,7 @@ fn validator_produce_attestation() {
remote_node
.http
.validator()
.publish_attestations(vec![attestation.clone()]),
.publish_attestations(vec![(attestation.clone(), subnet_id)]),
)
.expect("should publish attestation with invalid signature");
assert!(
@ -216,7 +226,7 @@ fn validator_produce_attestation() {
remote_node
.http
.validator()
.publish_attestations(vec![attestation.clone()]),
.publish_attestations(vec![(attestation.clone(), subnet_id)]),
)
.expect("should publish attestation");
assert!(
@ -410,10 +420,16 @@ fn validator_block_post() {
let spec = &E::default_spec();
let two_slots_secs = (spec.milliseconds_per_slot / 1_000) * 2;
let mut config = testing_client_config();
config.genesis = ClientGenesis::Interop {
validator_count: 8,
genesis_time: 13_371_337,
genesis_time: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
- two_slots_secs,
};
let node = build_node(&mut env, config);
@ -937,6 +953,8 @@ fn get_fork_choice() {
.beacon_chain()
.expect("node should have beacon chain")
.fork_choice
.read()
.proto_array()
.core_proto_array(),
"result should be as expected"
);
@ -1001,7 +1019,7 @@ fn proposer_slashing() {
let spec = &chain.spec;
// Check that there are no proposer slashings before insertion
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(proposer_slashings.len(), 0);
let slot = state.slot;
@ -1032,7 +1050,7 @@ fn proposer_slashing() {
assert!(result, true);
// Length should be just one as we've inserted only one proposer slashing
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(proposer_slashings.len(), 1);
assert_eq!(proposer_slashing.clone(), proposer_slashings[0]);
@ -1055,7 +1073,7 @@ fn proposer_slashing() {
assert!(result.is_err());
// Length should still be one as we've inserted nothing since last time.
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(proposer_slashings.len(), 1);
assert_eq!(proposer_slashing, proposer_slashings[0]);
}
@ -1088,7 +1106,7 @@ fn attester_slashing() {
let fork = &state.fork;
// Checking there are no attester slashings before insertion
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(attester_slashings.len(), 0);
let attester_slashing = build_double_vote_attester_slashing(
@ -1112,7 +1130,7 @@ fn attester_slashing() {
assert!(result, true);
// Length should be just one as we've inserted only one attester slashing
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(attester_slashings.len(), 1);
assert_eq!(attester_slashing, attester_slashings[0]);
@ -1133,10 +1151,10 @@ fn attester_slashing() {
.beacon()
.attester_slashing(invalid_attester_slashing),
);
assert!(result.is_err());
result.unwrap_err();
// Length should still be one as we've failed to insert the attester slashing.
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state, spec);
let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state);
assert_eq!(attester_slashings.len(), 1);
assert_eq!(attester_slashing, attester_slashings[0]);
}

View File

@ -374,13 +374,14 @@ pub fn get_testnet_dir(cli_args: &ArgMatches) -> Option<PathBuf> {
pub fn get_eth2_testnet_config<E: EthSpec>(
testnet_dir: &Option<PathBuf>,
) -> Result<Eth2TestnetConfig<E>, String> {
Ok(if let Some(testnet_dir) = testnet_dir {
if let Some(testnet_dir) = testnet_dir {
Eth2TestnetConfig::load(testnet_dir.clone())
.map_err(|e| format!("Unable to open testnet dir at {:?}: {}", testnet_dir, e))?
.map_err(|e| format!("Unable to open testnet dir at {:?}: {}", testnet_dir, e))
} else {
Eth2TestnetConfig::hard_coded()
.map_err(|e| format!("{} Error : {}", BAD_TESTNET_DIR_MESSAGE, e))?
})
.map_err(|e| format!("Error parsing hardcoded testnet: {}", e))?
.ok_or_else(|| format!("{}", BAD_TESTNET_DIR_MESSAGE))
}
}
/// A bit of hack to find an unused port.

View File

@ -34,6 +34,7 @@ pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE";
///
/// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores
/// intermittent "restore point" states pre-finalization.
#[derive(Debug)]
pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
/// The slot and state root at the point where the database is split between hot and cold.
///

View File

@ -11,7 +11,7 @@ use types::*;
///
/// Utilises lazy-loading from separate storage for its vector fields.
///
/// Spec v0.11.1
/// Spec v0.12.1
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
pub struct PartialBeaconState<T>
where

View File

@ -1,7 +1,7 @@
# Summary
* [Introduction](./intro.md)
* [Become a Validator](./become-a-validator.md)
* [Become an Altona Validator](./become-a-validator.md)
* [Using Docker](./become-a-validator-docker.md)
* [Building from Source](./become-a-validator-source.md)
* [Installation](./installation.md)

View File

@ -1,12 +1,11 @@
# Become an Ethereum 2.0 Testnet Validator on Witti
# Become an Ethereum 2.0 Testnet Validator on Altona
Running a Lighthouse validator on the [Witti](https://github.com/goerli/witti)
Running a Lighthouse validator on the [Altona](https://github.com/goerli/altona)
multi-client testnet is easy if you're familiar with the terminal.
Lighthouse runs on Linux, MacOS and Windows and has a Docker work-flow to make
things as simple as possible.
## 0. Acquire Goerli ETH
Before you install Lighthouse, you'll need [Metamask](https://metamask.io/) and 3.2 gETH
(Goerli ETH). We recommend the [mudit.blog

View File

@ -19,7 +19,7 @@ validators must listen to event logs from the deposit contract. Since the
latest blocks of the Eth1 chain are vulnerable to re-orgs due to minor network
partitions, beacon nodes follow the Eth1 chain at a distance of 1,024 blocks
(~4 hours) (see
[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#misc)).
[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#misc)).
This follow distance protects the beacon chain from on-boarding validators that
are likely to be removed due to an Eth1 re-org.
@ -27,13 +27,13 @@ Now we know there's a 4 hours delay before the beacon nodes even _consider_ an
Eth1 block. Once they _are_ considering these blocks, there's a voting period
where beacon validators vote on which Eth1 to include in the beacon chain. This
period is defined as 32 epochs (~3.4 hours, see
[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#time-parameters)).
[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#time-parameters)).
During this voting period, each beacon block producer includes an
[`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#eth1data)
[`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1data)
in their block which counts as a vote towards what that validator considers to
be the head of the Eth1 chain at the start of the voting period (with respect
to `ETH1_FOLLOW_DISTANCE`, of course). You can see the exact voting logic
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#eth1-data).
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#eth1-data).
These two delays combined represent the time between an Eth1 deposit being
included in an Eth1 data vote and that validator appearing in the beacon chain.
@ -57,17 +57,17 @@ They will simply be forgotten by the beacon chain! But, if those parameters were
correct, once the Eth1 delays have elapsed and the validator appears in the
beacon chain, there's _another_ delay before the validator becomes "active"
(canonical definition
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations.
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations.
Firstly, the validator won't become active until their beacon chain balance is
equal to or greater than
[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#gwei-values)
[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#gwei-values)
(32 ETH on mainnet, usually 3.2 ETH on testnets). Once this balance is reached,
the validator must wait until the start of the next epoch (up to 6.4 minutes)
for the
[`process_registry_updates`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#registry-updates)
[`process_registry_updates`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates)
routine to run. This routine activates validators with respect to a [churn
limit](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_validator_churn_limit);
limit](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit);
it will only allow the number of validators to increase (churn) by a certain
amount. Up until there are about 330,000 validators this churn limit is set to
4 and it starts to very slowly increase as the number of validators increases

View File

@ -259,7 +259,7 @@ Typical Responses | 200
### Returns
Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#fork) of the current head.
Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork) of the current head.
### Example Response
@ -478,7 +478,7 @@ canonical chain.
### Returns
Returns an object containing a single
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate)
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate)
and its tree hash root.
### Example Response
@ -540,7 +540,7 @@ Typical Responses | 200
### Returns
Returns an object containing the genesis
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate).
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate).
### Example Response

View File

@ -44,14 +44,14 @@ Typical Responses | 200
"effective_balance_increment": 1000000000,
"genesis_fork_version": "0x00000000",
"bls_withdrawal_prefix_byte": "0x00",
"min_genesis_delay": 86400,
"genesis_delay": 172800,
"milliseconds_per_slot": 12000,
"min_attestation_inclusion_delay": 1,
"min_seed_lookahead": 1,
"max_seed_lookahead": 4,
"min_epochs_to_inactivity_penalty": 4,
"min_validator_withdrawability_delay": 256,
"persistent_committee_period": 2048,
"shard_committee_period": 2048,
"base_reward_factor": 64,
"whistleblower_reward_quotient": 512,
"proposer_reward_quotient": 8,
@ -106,14 +106,14 @@ Typical Responses | 200
"effective_balance_increment": 1000000000,
"genesis_fork_version": "0x00000000",
"bls_withdrawal_prefix_byte": "0x00",
"min_genesis_delay": 86400,
"genesis_delay": 172800,
"milliseconds_per_slot": 12000,
"min_attestation_inclusion_delay": 1,
"min_seed_lookahead": 1,
"max_seed_lookahead": 4,
"min_epochs_to_inactivity_penalty": 4,
"min_validator_withdrawability_delay": 256,
"persistent_committee_period": 2048,
"shard_committee_period": 2048,
"base_reward_factor": 64,
"whistleblower_reward_quotient": 512,
"proposer_reward_quotient": 8,

View File

@ -20,10 +20,11 @@ pub const BAD_TESTNET_DIR_MESSAGE: &str = "The hard-coded testnet directory was
pub fn parse_testnet_dir_with_hardcoded_default<E: EthSpec>(
matches: &ArgMatches,
name: &'static str,
) -> Result<Eth2TestnetConfig<E>, String> {
) -> Result<Option<Eth2TestnetConfig<E>>, String> {
if let Some(path) = parse_optional::<PathBuf>(matches, name)? {
Eth2TestnetConfig::load(path.clone())
.map_err(|e| format!("Unable to open testnet dir at {:?}: {}", path, e))
.map(Some)
} else {
Eth2TestnetConfig::hard_coded()
.map_err(|e| format!("{} Error : {}", BAD_TESTNET_DIR_MESSAGE, e))

View File

@ -9,9 +9,9 @@ use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
const TAG: &str = "v0.11.1";
const TAG: &str = "v0.12.1";
// NOTE: the version of the unsafe contract lags the main tag, but the v0.9.2.1 code is compatible
// with the unmodified v0.11.1 contract
// with the unmodified v0.12.1 contract
const UNSAFE_TAG: &str = "v0.9.2.1";
fn spec_url() -> String {

View File

@ -23,15 +23,15 @@ impl From<ethabi::Error> for DecodeError {
pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000;
pub const DEPOSIT_GAS: usize = 400_000;
pub const ABI: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.json");
pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.bytecode");
pub const ABI: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.json");
pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.bytecode");
pub const DEPOSIT_DATA_LEN: usize = 420; // lol
pub mod testnet {
pub const ABI: &[u8] =
include_bytes!("../contracts/v0.11.1_testnet_validator_registration.json");
include_bytes!("../contracts/v0.12.1_testnet_validator_registration.json");
pub const BYTECODE: &[u8] =
include_bytes!("../contracts/v0.11.1_testnet_validator_registration.bytecode");
include_bytes!("../contracts/v0.12.1_testnet_validator_registration.bytecode");
}
pub fn encode_eth1_tx_data(deposit_data: &DepositData) -> Result<Vec<u8>, Error> {

View File

@ -11,7 +11,7 @@ lazy_static = "1.4.0"
num-bigint = "0.2.6"
eth2_hashing = "0.1.0"
hex = "0.4.2"
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" }
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.1.0" }
serde_yaml = "0.8.11"
serde = "1.0.110"
serde_derive = "1.0.110"

View File

@ -53,6 +53,6 @@ fn reference_public_keys() {
"Reference should be 48 bytes (public key size)"
);
assert_eq!(pair.pk.as_bytes(), reference);
assert_eq!(pair.pk.as_bytes().to_vec(), reference);
});
}

View File

@ -1,3 +1,4 @@
testnet*
schlesi-*
witti-*
altona*

View File

@ -6,7 +6,7 @@ use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
const TESTNET_ID: &str = "witti-v0-11-3";
const TESTNET_ID: &str = "altona-v2";
fn main() {
if !base_dir().exists() {
@ -39,7 +39,7 @@ pub fn get_all_files() -> Result<(), String> {
pub fn get_file(filename: &str) -> Result<(), String> {
let url = format!(
"https://raw.githubusercontent.com/sigp/witti/6d079b0f10f6bed75cd003e5f0ea5ecbe2044455/lighthouse/{}",
"https://raw.githubusercontent.com/sigp/witti/2bab01c2c18aea9f571e79d646acfd34704cbfde/altona/lighthouse/{}",
filename
);

View File

@ -20,14 +20,16 @@ pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
pub const HARDCODED_TESTNET: &str = "witti-v0-11-3";
/// The name of the testnet to hardcode.
///
/// Should be set to `None` when no existing testnet is compatible with the codebase.
pub const HARDCODED_TESTNET: Option<&str> = Some("altona-v2");
pub const HARDCODED_YAML_CONFIG: &[u8] = include_bytes!("../witti-v0-11-3/config.yaml");
pub const HARDCODED_DEPLOY_BLOCK: &[u8] = include_bytes!("../witti-v0-11-3/deploy_block.txt");
pub const HARDCODED_DEPOSIT_CONTRACT: &[u8] =
include_bytes!("../witti-v0-11-3/deposit_contract.txt");
pub const HARDCODED_GENESIS_STATE: &[u8] = include_bytes!("../witti-v0-11-3/genesis.ssz");
pub const HARDCODED_BOOT_ENR: &[u8] = include_bytes!("../witti-v0-11-3/boot_enr.yaml");
pub const HARDCODED_YAML_CONFIG: &[u8] = include_bytes!("../altona-v2/config.yaml");
pub const HARDCODED_DEPLOY_BLOCK: &[u8] = include_bytes!("../altona-v2/deploy_block.txt");
pub const HARDCODED_DEPOSIT_CONTRACT: &[u8] = include_bytes!("../altona-v2/deposit_contract.txt");
pub const HARDCODED_GENESIS_STATE: &[u8] = include_bytes!("../altona-v2/genesis.ssz");
pub const HARDCODED_BOOT_ENR: &[u8] = include_bytes!("../altona-v2/boot_enr.yaml");
/// Specifies an Eth2 testnet.
///
@ -42,29 +44,34 @@ pub struct Eth2TestnetConfig<E: EthSpec> {
}
impl<E: EthSpec> Eth2TestnetConfig<E> {
// Creates the `Eth2TestnetConfig` that was included in the binary at compile time. This can be
// considered the default Lighthouse testnet.
//
// Returns an error if those included bytes are invalid (this is unlikely).
pub fn hard_coded() -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(HARDCODED_DEPOSIT_CONTRACT)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(HARDCODED_DEPLOY_BLOCK)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(HARDCODED_BOOT_ENR)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state: Some(
BeaconState::from_ssz_bytes(HARDCODED_GENESIS_STATE)
.map_err(|e| format!("Unable to parse genesis state: {:?}", e))?,
),
yaml_config: Some(
serde_yaml::from_reader(HARDCODED_YAML_CONFIG)
.map_err(|e| format!("Unable to parse genesis state: {:?}", e))?,
),
})
/// Creates the `Eth2TestnetConfig` that was included in the binary at compile time. This can be
/// considered the default Lighthouse testnet.
///
/// Returns an error if those included bytes are invalid (this is unlikely).
/// Returns `None` if the hardcoded testnet is disabled.
pub fn hard_coded() -> Result<Option<Self>, String> {
if HARDCODED_TESTNET.is_some() {
Ok(Some(Self {
deposit_contract_address: serde_yaml::from_reader(HARDCODED_DEPOSIT_CONTRACT)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(HARDCODED_DEPLOY_BLOCK)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(HARDCODED_BOOT_ENR)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state: Some(
BeaconState::from_ssz_bytes(HARDCODED_GENESIS_STATE)
.map_err(|e| format!("Unable to parse genesis state: {:?}", e))?,
),
yaml_config: Some(
serde_yaml::from_reader(HARDCODED_YAML_CONFIG)
.map_err(|e| format!("Unable to parse genesis state: {:?}", e))?,
),
}))
} else {
Ok(None)
}
}
// Write the files to the directory.
@ -207,17 +214,16 @@ mod tests {
type E = MainnetEthSpec;
/* TODO: disabled until testnet config is updated for v0.11
#[test]
fn hard_coded_works() {
let dir: Eth2TestnetConfig<E> =
Eth2TestnetConfig::hard_coded().expect("should decode hard_coded params");
assert!(dir.boot_enr.is_some());
assert!(dir.genesis_state.is_some());
assert!(dir.yaml_config.is_some());
if let Some(dir) =
Eth2TestnetConfig::<E>::hard_coded().expect("should decode hard_coded params")
{
assert!(dir.boot_enr.is_some());
assert!(dir.genesis_state.is_some());
assert!(dir.yaml_config.is_some());
}
}
*/
#[test]
fn round_trip() {

View File

@ -17,5 +17,5 @@ hex = "0.4.2"
eth2_ssz = "0.1.2"
serde_json = "1.0.52"
eth2_config = { path = "../eth2_config" }
proto_array_fork_choice = { path = "../../consensus/proto_array_fork_choice" }
proto_array = { path = "../../consensus/proto_array" }
operation_pool = { path = "../../beacon_node/operation_pool" }

View File

@ -12,12 +12,12 @@ use std::time::Duration;
use types::{
Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex,
Epoch, EthSpec, Fork, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, Signature,
SignedAggregateAndProof, SignedBeaconBlock, Slot,
SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId,
};
use url::Url;
pub use operation_pool::PersistedOperationPool;
pub use proto_array_fork_choice::core::ProtoArray;
pub use proto_array::core::ProtoArray;
pub use rest_types::{
CanonicalHeadResponse, Committee, HeadBeaconBlock, Health, IndividualVotesRequest,
IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes,
@ -227,7 +227,7 @@ impl<E: EthSpec> Validator<E> {
/// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network.
pub async fn publish_attestations(
&self,
attestation: Vec<Attestation<E>>,
attestation: Vec<(Attestation<E>, SubnetId)>,
) -> Result<PublishStatus, Error> {
let client = self.0.clone();
let url = self.url("attestations")?;

View File

@ -22,6 +22,8 @@ pub struct ValidatorDutyBase<T> {
pub attestation_committee_index: Option<CommitteeIndex>,
/// The position of the validator in the committee.
pub attestation_committee_position: Option<usize>,
/// The committee count at `attestation_slot`.
pub committee_count_at_slot: Option<u64>,
/// The slots in which a validator must propose a block (can be empty).
pub block_proposal_slots: Vec<Slot>,
/// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)`
@ -66,6 +68,8 @@ pub struct ValidatorSubscription {
pub attestation_committee_index: CommitteeIndex,
/// The slot in which to subscribe.
pub slot: Slot,
/// Committee count at slot to subscribe.
pub committee_count_at_slot: u64,
/// If true, the validator is an aggregator and the beacon node should aggregate attestations
/// for this slot.
pub is_aggregator: bool,

View File

@ -24,6 +24,11 @@ pub trait SlotClock: Send + Sync + Sized {
/// Returns the slot at this present time.
fn now(&self) -> Option<Slot>;
/// Indicates if the current time is prior to genesis time.
///
/// Returns `None` if the system clock cannot be read.
fn is_prior_to_genesis(&self) -> Option<bool>;
/// Returns the present time as a duration since the UNIX epoch.
///
/// Returns `None` if the present time is before the UNIX epoch (unlikely).

View File

@ -41,6 +41,10 @@ impl ManualSlotClock {
self.set_slot(self.now().unwrap().as_u64() + 1)
}
pub fn genesis_duration(&self) -> &Duration {
&self.genesis_duration
}
/// Returns the duration between UNIX epoch and the start of `slot`.
pub fn start_of(&self, slot: Slot) -> Option<Duration> {
let slot = slot
@ -104,6 +108,10 @@ impl SlotClock for ManualSlotClock {
self.slot_of(*self.current_time.read())
}
fn is_prior_to_genesis(&self) -> Option<bool> {
Some(*self.current_time.read() < self.genesis_duration)
}
fn now_duration(&self) -> Option<Duration> {
Some(*self.current_time.read())
}
@ -160,6 +168,26 @@ mod tests {
assert_eq!(clock.now(), Some(Slot::new(123)));
}
#[test]
fn test_is_prior_to_genesis() {
let genesis_secs = 1;
let clock = ManualSlotClock::new(
Slot::new(0),
Duration::from_secs(genesis_secs),
Duration::from_secs(1),
);
*clock.current_time.write() = Duration::from_secs(genesis_secs - 1);
assert!(clock.is_prior_to_genesis().unwrap(), "prior to genesis");
*clock.current_time.write() = Duration::from_secs(genesis_secs);
assert!(!clock.is_prior_to_genesis().unwrap(), "at genesis");
*clock.current_time.write() = Duration::from_secs(genesis_secs + 1);
assert!(!clock.is_prior_to_genesis().unwrap(), "after genesis");
}
#[test]
fn start_of() {
// Genesis slot and genesis duration 0.

View File

@ -22,6 +22,11 @@ impl SlotClock for SystemTimeSlotClock {
self.clock.slot_of(now)
}
fn is_prior_to_genesis(&self) -> Option<bool> {
let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?;
Some(now < *self.clock.genesis_duration())
}
fn now_duration(&self) -> Option<Duration> {
SystemTime::now().duration_since(UNIX_EPOCH).ok()
}

View File

@ -0,0 +1,20 @@
[package]
name = "fork_choice"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
types = { path = "../types" }
proto_array = { path = "../proto_array" }
eth2_ssz = { path = "../ssz" }
eth2_ssz_derive = { path = "../ssz_derive" }
[dev-dependencies]
state_processing = { path = "../../consensus/state_processing" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }
store = { path = "../../beacon_node/store" }
tree_hash = { path = "../../consensus/tree_hash" }
slot_clock = { path = "../../common/slot_clock" }

View File

@ -0,0 +1,885 @@
use crate::ForkChoiceStore;
use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode};
use std::marker::PhantomData;
use types::{
BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, IndexedAttestation, Slot,
};
/// Defined here:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#configuration
pub const SAFE_SLOTS_TO_UPDATE_JUSTIFIED: u64 = 8;
#[derive(Debug)]
pub enum Error<T> {
InvalidAttestation(InvalidAttestation),
InvalidBlock(InvalidBlock),
ProtoArrayError(String),
InvalidProtoArrayBytes(String),
MissingProtoArrayBlock(Hash256),
UnknownAncestor {
ancestor_slot: Slot,
descendant_root: Hash256,
},
InconsistentOnTick {
previous_slot: Slot,
time: Slot,
},
BeaconStateError(BeaconStateError),
AttemptToRevertJustification {
store: Slot,
state: Slot,
},
ForkChoiceStoreError(T),
UnableToSetJustifiedCheckpoint(T),
AfterBlockFailed(T),
}
impl<T> From<InvalidAttestation> for Error<T> {
fn from(e: InvalidAttestation) -> Self {
Error::InvalidAttestation(e)
}
}
#[derive(Debug)]
pub enum InvalidBlock {
UnknownParent(Hash256),
FutureSlot {
current_slot: Slot,
block_slot: Slot,
},
FinalizedSlot {
finalized_slot: Slot,
block_slot: Slot,
},
NotFinalizedDescendant {
finalized_root: Hash256,
block_ancestor: Option<Hash256>,
},
}
#[derive(Debug)]
pub enum InvalidAttestation {
/// The attestations aggregation bits were empty when they shouldn't be.
EmptyAggregationBitfield,
/// The `attestation.data.beacon_block_root` block is unknown.
UnknownHeadBlock { beacon_block_root: Hash256 },
/// The `attestation.data.slot` is not from the same epoch as `data.target.epoch` and therefore
/// the attestation is invalid.
BadTargetEpoch { target: Epoch, slot: Slot },
/// The target root of the attestation points to a block that we have not verified.
UnknownTargetRoot(Hash256),
/// The attestation is for an epoch in the future (with respect to the gossip clock disparity).
FutureEpoch {
attestation_epoch: Epoch,
current_epoch: Epoch,
},
/// The attestation is for an epoch in the past (with respect to the gossip clock disparity).
PastEpoch {
attestation_epoch: Epoch,
current_epoch: Epoch,
},
/// The attestation references a target root that does not match what is stored in our
/// database.
InvalidTarget {
attestation: Hash256,
local: Hash256,
},
/// The attestation is attesting to a state that is later than itself. (Viz., attesting to the
/// future).
AttestsToFutureBlock { block: Slot, attestation: Slot },
}
impl<T> From<String> for Error<T> {
fn from(e: String) -> Self {
Error::ProtoArrayError(e)
}
}
/// Calculate how far `slot` lies from the start of its epoch.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
pub fn compute_slots_since_epoch_start<E: EthSpec>(slot: Slot) -> Slot {
slot - slot
.epoch(E::slots_per_epoch())
.start_slot(E::slots_per_epoch())
}
/// Calculate the first slot in `epoch`.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
fn compute_start_slot_at_epoch<E: EthSpec>(epoch: Epoch) -> Slot {
epoch.start_slot(E::slots_per_epoch())
}
/// Called whenever the current time increases.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick
fn on_tick<T, E>(store: &mut T, time: Slot) -> Result<(), Error<T::Error>>
where
T: ForkChoiceStore<E>,
E: EthSpec,
{
let previous_slot = store.get_current_slot();
if time > previous_slot + 1 {
return Err(Error::InconsistentOnTick {
previous_slot,
time,
});
}
// Update store time.
store.set_current_slot(time);
let current_slot = store.get_current_slot();
if !(current_slot > previous_slot && compute_slots_since_epoch_start::<E>(current_slot) == 0) {
return Ok(());
}
if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch {
store
.set_justified_checkpoint(*store.best_justified_checkpoint())
.map_err(Error::ForkChoiceStoreError)?;
}
Ok(())
}
/// Used for queuing attestations from the current slot. Only contains the minimum necessary
/// information about the attestation.
#[derive(Clone, PartialEq, Encode, Decode)]
pub struct QueuedAttestation {
slot: Slot,
attesting_indices: Vec<u64>,
block_root: Hash256,
target_epoch: Epoch,
}
impl<E: EthSpec> From<&IndexedAttestation<E>> for QueuedAttestation {
fn from(a: &IndexedAttestation<E>) -> Self {
Self {
slot: a.data.slot,
attesting_indices: a.attesting_indices[..].to_vec(),
block_root: a.data.beacon_block_root,
target_epoch: a.data.target.epoch,
}
}
}
/// Returns all values in `self.queued_attestations` that have a slot that is earlier than the
/// current slot. Also removes those values from `self.queued_attestations`.
fn dequeue_attestations(
current_slot: Slot,
queued_attestations: &mut Vec<QueuedAttestation>,
) -> Vec<QueuedAttestation> {
let remaining = queued_attestations.split_off(
queued_attestations
.iter()
.position(|a| a.slot >= current_slot)
.unwrap_or_else(|| queued_attestations.len()),
);
std::mem::replace(queued_attestations, remaining)
}
/// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice":
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice
///
/// ## Detail
///
/// This struct wraps `ProtoArrayForkChoice` and provides:
///
/// - Management of the justified state and caching of balances.
/// - Queuing of attestations from the current slot.
pub struct ForkChoice<T, E> {
/// Storage for `ForkChoice`, modelled off the spec `Store` object.
fc_store: T,
/// The underlying representation of the block DAG.
proto_array: ProtoArrayForkChoice,
/// Attestations that arrived at the current slot and must be queued for later processing.
queued_attestations: Vec<QueuedAttestation>,
_phantom: PhantomData<E>,
}
impl<T, E> PartialEq for ForkChoice<T, E>
where
T: ForkChoiceStore<E> + PartialEq,
E: EthSpec,
{
fn eq(&self, other: &Self) -> bool {
self.fc_store == other.fc_store
&& self.proto_array == other.proto_array
&& self.queued_attestations == other.queued_attestations
}
}
impl<T, E> ForkChoice<T, E>
where
T: ForkChoiceStore<E>,
E: EthSpec,
{
/// Instantiates `Self` from the genesis parameters.
pub fn from_genesis(
fc_store: T,
genesis_block: &BeaconBlock<E>,
) -> Result<Self, Error<T::Error>> {
let finalized_block_slot = genesis_block.slot;
let finalized_block_state_root = genesis_block.state_root;
let proto_array = ProtoArrayForkChoice::new(
finalized_block_slot,
finalized_block_state_root,
fc_store.justified_checkpoint().epoch,
fc_store.finalized_checkpoint().epoch,
fc_store.finalized_checkpoint().root,
)?;
Ok(Self {
fc_store,
proto_array,
queued_attestations: vec![],
_phantom: PhantomData,
})
}
/// Instantiates `Self` from some existing components.
///
/// This is useful if the existing components have been loaded from disk after a process
/// restart.
pub fn from_components(
fc_store: T,
proto_array: ProtoArrayForkChoice,
queued_attestations: Vec<QueuedAttestation>,
) -> Self {
Self {
fc_store,
proto_array,
queued_attestations,
_phantom: PhantomData,
}
}
/// Returns the block root of an ancestor of `block_root` at the given `slot`. (Note: `slot` refers
/// to the block that is *returned*, not the one that is supplied.)
///
/// The result may be `Ok(None)` if the block does not descend from the finalized block. This
/// is an artifact of proto-array, sometimes it contains descendants of blocks that have been
/// pruned.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_ancestor
#[allow(clippy::if_same_then_else)]
fn get_ancestor(
&self,
block_root: Hash256,
ancestor_slot: Slot,
) -> Result<Option<Hash256>, Error<T::Error>>
where
T: ForkChoiceStore<E>,
E: EthSpec,
{
let block = self
.proto_array
.get_block(&block_root)
.ok_or_else(|| Error::MissingProtoArrayBlock(block_root))?;
if block.slot > ancestor_slot {
Ok(self
.proto_array
.core_proto_array()
.iter_block_roots(&block_root)
// Search for a slot that is **less than or equal to** the target slot. We check
// for lower slots to account for skip slots.
.find(|(_, slot)| *slot <= ancestor_slot)
.map(|(root, _)| root))
} else if block.slot == ancestor_slot {
Ok(Some(block_root))
} else {
// Root is older than queried slot, thus a skip slot. Return most recent root prior to
// slot.
Ok(Some(block_root))
}
}
/// Run the fork choice rule to determine the head.
///
/// ## Specification
///
/// Is equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head
pub fn get_head(&mut self, current_slot: Slot) -> Result<Hash256, Error<T::Error>> {
self.update_time(current_slot)?;
let store = &mut self.fc_store;
let result = self
.proto_array
.find_head(
store.justified_checkpoint().epoch,
store.justified_checkpoint().root,
store.finalized_checkpoint().epoch,
store.justified_balances(),
)
.map_err(Into::into);
result
}
/// Returns `true` if the given `store` should be updated to set
/// `state.current_justified_checkpoint` its `justified_checkpoint`.
///
/// ## Specification
///
/// Is equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint
fn should_update_justified_checkpoint(
&mut self,
current_slot: Slot,
state: &BeaconState<E>,
) -> Result<bool, Error<T::Error>> {
self.update_time(current_slot)?;
let new_justified_checkpoint = &state.current_justified_checkpoint;
if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot())
< SAFE_SLOTS_TO_UPDATE_JUSTIFIED
{
return Ok(true);
}
let justified_slot =
compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch);
// This sanity check is not in the spec, but the invariant is implied.
if justified_slot >= state.slot {
return Err(Error::AttemptToRevertJustification {
store: justified_slot,
state: state.slot,
});
}
// We know that the slot for `new_justified_checkpoint.root` is not greater than
// `state.slot`, since a state cannot justify its own slot.
//
// We know that `new_justified_checkpoint.root` is an ancestor of `state`, since a `state`
// only ever justifies ancestors.
//
// A prior `if` statement protects against a justified_slot that is greater than
// `state.slot`
let justified_ancestor =
self.get_ancestor(new_justified_checkpoint.root, justified_slot)?;
if justified_ancestor != Some(self.fc_store.justified_checkpoint().root) {
return Ok(false);
}
Ok(true)
}
/// Add `block` to the fork choice DAG.
///
/// - `block_root` is the root of `block.
/// - The root of `state` matches `block.state_root`.
///
/// ## Specification
///
/// Approximates:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_block
///
/// It only approximates the specification since it does not run the `state_transition` check.
/// That should have already been called upstream and it's too expensive to call again.
///
/// ## Notes:
///
/// The supplied block **must** pass the `state_transition` function as it will not be run
/// here.
pub fn on_block(
&mut self,
current_slot: Slot,
block: &BeaconBlock<E>,
block_root: Hash256,
state: &BeaconState<E>,
) -> Result<(), Error<T::Error>> {
let current_slot = self.update_time(current_slot)?;
// Parent block must be known.
if !self.proto_array.contains_block(&block.parent_root) {
return Err(Error::InvalidBlock(InvalidBlock::UnknownParent(
block.parent_root,
)));
}
// Blocks cannot be in the future. If they are, their consideration must be delayed until
// the are in the past.
//
// Note: presently, we do not delay consideration. We just drop the block.
if block.slot > current_slot {
return Err(Error::InvalidBlock(InvalidBlock::FutureSlot {
current_slot,
block_slot: block.slot,
}));
}
// Check that block is later than the finalized epoch slot (optimization to reduce calls to
// get_ancestor).
let finalized_slot =
compute_start_slot_at_epoch::<E>(self.fc_store.finalized_checkpoint().epoch);
if block.slot <= finalized_slot {
return Err(Error::InvalidBlock(InvalidBlock::FinalizedSlot {
finalized_slot,
block_slot: block.slot,
}));
}
// Check block is a descendant of the finalized block at the checkpoint finalized slot.
//
// Note: the specification uses `hash_tree_root(block)` instead of `block.parent_root` for
// the start of this search. I claim that since `block.slot > finalized_slot` it is
// equivalent to use the parent root for this search. Doing so reduces a single lookup
// (trivial), but more importantly, it means we don't need to have added `block` to
// `self.proto_array` to do this search. See:
//
// https://github.com/ethereum/eth2.0-specs/pull/1884
let block_ancestor = self.get_ancestor(block.parent_root, finalized_slot)?;
let finalized_root = self.fc_store.finalized_checkpoint().root;
if block_ancestor != Some(finalized_root) {
return Err(Error::InvalidBlock(InvalidBlock::NotFinalizedDescendant {
finalized_root,
block_ancestor,
}));
}
// Update justified checkpoint.
if state.current_justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch {
if state.current_justified_checkpoint.epoch
> self.fc_store.best_justified_checkpoint().epoch
{
self.fc_store
.set_best_justified_checkpoint(state.current_justified_checkpoint);
}
if self.should_update_justified_checkpoint(current_slot, state)? {
self.fc_store
.set_justified_checkpoint(state.current_justified_checkpoint)
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
}
// Update finalized checkpoint.
if state.finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch {
self.fc_store
.set_finalized_checkpoint(state.finalized_checkpoint);
let finalized_slot =
compute_start_slot_at_epoch::<E>(self.fc_store.finalized_checkpoint().epoch);
// Note: the `if` statement here is not part of the specification, but I claim that it
// is an optimization and equivalent to the specification. See this PR for more
// information:
//
// https://github.com/ethereum/eth2.0-specs/pull/1880
if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint {
if state.current_justified_checkpoint.epoch
> self.fc_store.justified_checkpoint().epoch
|| self
.get_ancestor(self.fc_store.justified_checkpoint().root, finalized_slot)?
!= Some(self.fc_store.finalized_checkpoint().root)
{
self.fc_store
.set_justified_checkpoint(state.current_justified_checkpoint)
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
}
}
let target_slot = block
.slot
.epoch(E::slots_per_epoch())
.start_slot(E::slots_per_epoch());
let target_root = if block.slot == target_slot {
block_root
} else {
*state
.get_block_root(target_slot)
.map_err(Error::BeaconStateError)?
};
self.fc_store
.on_verified_block(block, block_root, state)
.map_err(Error::AfterBlockFailed)?;
// This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes.
self.proto_array.process_block(ProtoBlock {
slot: block.slot,
root: block_root,
parent_root: Some(block.parent_root),
target_root,
state_root: block.state_root,
justified_epoch: state.current_justified_checkpoint.epoch,
finalized_epoch: state.finalized_checkpoint.epoch,
})?;
Ok(())
}
/// Validates the `indexed_attestation` for application to fork choice.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#validate_on_attestation
fn validate_on_attestation(
&self,
indexed_attestation: &IndexedAttestation<E>,
) -> Result<(), InvalidAttestation> {
// There is no point in processing an attestation with an empty bitfield. Reject
// it immediately.
//
// This is not in the specification, however it should be transparent to other nodes. We
// return early here to avoid wasting precious resources verifying the rest of it.
if indexed_attestation.attesting_indices.len() == 0 {
return Err(InvalidAttestation::EmptyAggregationBitfield);
}
let slot_now = self.fc_store.get_current_slot();
let epoch_now = slot_now.epoch(E::slots_per_epoch());
let target = indexed_attestation.data.target.clone();
// Attestation must be from the current or previous epoch.
if target.epoch > epoch_now {
return Err(InvalidAttestation::FutureEpoch {
attestation_epoch: target.epoch,
current_epoch: epoch_now,
});
} else if target.epoch + 1 < epoch_now {
return Err(InvalidAttestation::PastEpoch {
attestation_epoch: target.epoch,
current_epoch: epoch_now,
});
}
if target.epoch != indexed_attestation.data.slot.epoch(E::slots_per_epoch()) {
return Err(InvalidAttestation::BadTargetEpoch {
target: target.epoch,
slot: indexed_attestation.data.slot,
});
}
// Attestation target must be for a known block.
//
// We do not delay the block for later processing to reduce complexity and DoS attack
// surface.
if !self.proto_array.contains_block(&target.root) {
return Err(InvalidAttestation::UnknownTargetRoot(target.root));
}
// Load the block for `attestation.data.beacon_block_root`.
//
// This indirectly checks to see if the `attestation.data.beacon_block_root` is in our fork
// choice. Any known, non-finalized block should be in fork choice, so this check
// immediately filters out attestations that attest to a block that has not been processed.
//
// Attestations must be for a known block. If the block is unknown, we simply drop the
// attestation and do not delay consideration for later.
let block = self
.proto_array
.get_block(&indexed_attestation.data.beacon_block_root)
.ok_or_else(|| InvalidAttestation::UnknownHeadBlock {
beacon_block_root: indexed_attestation.data.beacon_block_root,
})?;
if block.target_root != target.root {
return Err(InvalidAttestation::InvalidTarget {
attestation: target.root,
local: block.target_root,
});
}
// Attestations must not be for blocks in the future. If this is the case, the attestation
// should not be considered.
if block.slot > indexed_attestation.data.slot {
return Err(InvalidAttestation::AttestsToFutureBlock {
block: block.slot,
attestation: indexed_attestation.data.slot,
});
}
Ok(())
}
/// Register `attestation` with the fork choice DAG so that it may influence future calls to
/// `Self::get_head`.
///
/// ## Specification
///
/// Approximates:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_attestation
///
/// It only approximates the specification since it does not perform
/// `is_valid_indexed_attestation` since that should already have been called upstream and it's
/// too expensive to call again.
///
/// ## Notes:
///
/// The supplied `attestation` **must** pass the `in_valid_indexed_attestation` function as it
/// will not be run here.
pub fn on_attestation(
&mut self,
current_slot: Slot,
attestation: &IndexedAttestation<E>,
) -> Result<(), Error<T::Error>> {
// Ensure the store is up-to-date.
self.update_time(current_slot)?;
// Ignore any attestations to the zero hash.
//
// This is an edge case that results from the spec aliasing the zero hash to the genesis
// block. Attesters may attest to the zero hash if they have never seen a block.
//
// We have two options here:
//
// 1. Apply all zero-hash attestations to the genesis block.
// 2. Ignore all attestations to the zero hash.
//
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
// fine because votes to the genesis block are not useful; all validators implicitly attest
// to genesis just by being present in the chain.
if attestation.data.beacon_block_root == Hash256::zero() {
return Ok(());
}
self.validate_on_attestation(attestation)?;
if attestation.data.slot < self.fc_store.get_current_slot() {
for validator_index in attestation.attesting_indices.iter() {
self.proto_array.process_attestation(
*validator_index as usize,
attestation.data.beacon_block_root,
attestation.data.target.epoch,
)?;
}
} else {
// The spec declares:
//
// ```
// Attestations can only affect the fork choice of subsequent slots.
// Delay consideration in the fork choice until their slot is in the past.
// ```
self.queued_attestations
.push(QueuedAttestation::from(attestation));
}
Ok(())
}
/// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided
/// `current_slot`. Returns the value of `self.fc_store.get_current_slot`.
pub fn update_time(&mut self, current_slot: Slot) -> Result<Slot, Error<T::Error>> {
while self.fc_store.get_current_slot() < current_slot {
let previous_slot = self.fc_store.get_current_slot();
// Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't
// get stuck in a loop.
on_tick(&mut self.fc_store, previous_slot + 1)?
}
// Process any attestations that might now be eligible.
self.process_attestation_queue()?;
Ok(self.fc_store.get_current_slot())
}
/// Processes and removes from the queue any queued attestations which may now be eligible for
/// processing due to the slot clock incrementing.
fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> {
for attestation in dequeue_attestations(
self.fc_store.get_current_slot(),
&mut self.queued_attestations,
) {
for validator_index in attestation.attesting_indices.iter() {
self.proto_array.process_attestation(
*validator_index as usize,
attestation.block_root,
attestation.target_epoch,
)?;
}
}
Ok(())
}
/// Returns `true` if the block is known.
pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.proto_array.contains_block(block_root)
}
/// Returns a `ProtoBlock` if the block is known.
pub fn get_block(&self, block_root: &Hash256) -> Option<ProtoBlock> {
self.proto_array.get_block(block_root)
}
/// Returns the latest message for a given validator, if any.
///
/// Returns `(block_root, block_slot)`.
///
/// ## Notes
///
/// It may be prudent to call `Self::update_time` before calling this function,
/// since some attestations might be queued and awaiting processing.
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
self.proto_array.latest_message(validator_index)
}
/// Returns a reference to the underlying fork choice DAG.
pub fn proto_array(&self) -> &ProtoArrayForkChoice {
&self.proto_array
}
/// Returns a reference to the underlying `fc_store`.
pub fn fc_store(&self) -> &T {
&self.fc_store
}
/// Returns a reference to the currently queued attestations.
pub fn queued_attestations(&self) -> &[QueuedAttestation] {
&self.queued_attestations
}
/// Prunes the underlying fork choice DAG.
pub fn prune(&mut self) -> Result<(), Error<T::Error>> {
let finalized_root = self.fc_store.finalized_checkpoint().root;
self.proto_array
.maybe_prune(finalized_root)
.map_err(Into::into)
}
/// Instantiate `Self` from some `PersistedForkChoice` generated by a earlier call to
/// `Self::to_persisted`.
pub fn from_persisted(
persisted: PersistedForkChoice,
fc_store: T,
) -> Result<Self, Error<T::Error>> {
let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes)
.map_err(Error::InvalidProtoArrayBytes)?;
Ok(Self {
fc_store,
proto_array,
queued_attestations: persisted.queued_attestations,
_phantom: PhantomData,
})
}
/// Takes a snapshot of `Self` and stores it in `PersistedForkChoice`, allowing this struct to
/// be instantiated again later.
pub fn to_persisted(&self) -> PersistedForkChoice {
PersistedForkChoice {
proto_array_bytes: self.proto_array().as_bytes(),
queued_attestations: self.queued_attestations().to_vec(),
}
}
}
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
///
/// This is used when persisting the state of the fork choice to disk.
#[derive(Encode, Decode, Clone)]
pub struct PersistedForkChoice {
proto_array_bytes: Vec<u8>,
queued_attestations: Vec<QueuedAttestation>,
}
#[cfg(test)]
mod tests {
use super::*;
use types::{EthSpec, MainnetEthSpec};
type E = MainnetEthSpec;
#[test]
fn slots_since_epoch_start() {
for epoch in 0..3 {
for slot in 0..E::slots_per_epoch() {
let input = epoch * E::slots_per_epoch() + slot;
assert_eq!(compute_slots_since_epoch_start::<E>(Slot::new(input)), slot)
}
}
}
#[test]
fn start_slot_at_epoch() {
for epoch in 0..3 {
assert_eq!(
compute_start_slot_at_epoch::<E>(Epoch::new(epoch)),
epoch * E::slots_per_epoch()
)
}
}
fn get_queued_attestations() -> Vec<QueuedAttestation> {
(1..4)
.into_iter()
.map(|i| QueuedAttestation {
slot: Slot::new(i),
attesting_indices: vec![],
block_root: Hash256::zero(),
target_epoch: Epoch::new(0),
})
.collect()
}
fn get_slots(queued_attestations: &[QueuedAttestation]) -> Vec<u64> {
queued_attestations.iter().map(|a| a.slot.into()).collect()
}
fn test_queued_attestations(current_time: Slot) -> (Vec<u64>, Vec<u64>) {
let mut queued = get_queued_attestations();
let dequeued = dequeue_attestations(current_time, &mut queued);
(get_slots(&queued), get_slots(&dequeued))
}
#[test]
fn dequeing_attestations() {
let (queued, dequeued) = test_queued_attestations(Slot::new(0));
assert_eq!(queued, vec![1, 2, 3]);
assert!(dequeued.is_empty());
let (queued, dequeued) = test_queued_attestations(Slot::new(1));
assert_eq!(queued, vec![1, 2, 3]);
assert!(dequeued.is_empty());
let (queued, dequeued) = test_queued_attestations(Slot::new(2));
assert_eq!(queued, vec![2, 3]);
assert_eq!(dequeued, vec![1]);
let (queued, dequeued) = test_queued_attestations(Slot::new(3));
assert_eq!(queued, vec![3]);
assert_eq!(dequeued, vec![1, 2]);
let (queued, dequeued) = test_queued_attestations(Slot::new(4));
assert!(queued.is_empty());
assert_eq!(dequeued, vec![1, 2, 3]);
}
}

View File

@ -0,0 +1,61 @@
use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot};
/// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice":
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#store
///
/// ## Detail
///
/// This is only an approximation for two reasons:
///
/// - This crate stores the actual block DAG in `ProtoArrayForkChoice`.
/// - `time` is represented using `Slot` instead of UNIX epoch `u64`.
///
/// ## Motiviation
///
/// The primary motivation for defining this as a trait to be implemented upstream rather than a
/// concrete struct is to allow this crate to be free from "impure" on-disk database logic,
/// hopefully making auditing easier.
pub trait ForkChoiceStore<T: EthSpec>: Sized {
type Error;
/// Returns the last value passed to `Self::update_time`.
fn get_current_slot(&self) -> Slot;
/// Set the value to be returned by `Self::get_current_slot`.
///
/// ## Notes
///
/// This should only ever be called from within `ForkChoice::on_tick`.
fn set_current_slot(&mut self, slot: Slot);
/// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork
/// choice. Allows the implementer to performing caching or other housekeeping duties.
fn on_verified_block(
&mut self,
block: &BeaconBlock<T>,
block_root: Hash256,
state: &BeaconState<T>,
) -> Result<(), Self::Error>;
/// Returns the `justified_checkpoint`.
fn justified_checkpoint(&self) -> &Checkpoint;
/// Returns balances from the `state` identified by `justified_checkpoint.root`.
fn justified_balances(&self) -> &[u64];
/// Returns the `best_justified_checkpoint`.
fn best_justified_checkpoint(&self) -> &Checkpoint;
/// Returns the `finalized_checkpoint`.
fn finalized_checkpoint(&self) -> &Checkpoint;
/// Sets `finalized_checkpoint`.
fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint);
/// Sets the `justified_checkpoint`.
fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Self::Error>;
/// Sets the `best_justified_checkpoint`.
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint);
}

View File

@ -0,0 +1,8 @@
mod fork_choice;
mod fork_choice_store;
pub use crate::fork_choice::{
Error, ForkChoice, InvalidAttestation, InvalidBlock, PersistedForkChoice, QueuedAttestation,
SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
pub use fork_choice_store::ForkChoiceStore;

View File

@ -0,0 +1,812 @@
#![cfg(not(debug_assertions))]
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError,
};
use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation,
SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
use std::sync::Mutex;
use store::{MemoryStore, StoreConfig};
use types::{
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},
Epoch, EthSpec, IndexedAttestation, MainnetEthSpec, Slot, SubnetId,
};
use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock};
pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 16;
/// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay {
/// No delay between creation and mutation.
NoDelay,
/// Create `n` blocks before mutating the attestation.
Blocks(usize),
}
/// A helper struct to make testing fork choice more ergonomic and less repetitive.
struct ForkChoiceTest {
harness: BeaconChainHarness<HarnessType<E>>,
}
impl ForkChoiceTest {
/// Creates a new tester.
pub fn new() -> Self {
let harness = BeaconChainHarness::new_with_target_aggregators(
MainnetEthSpec,
generate_deterministic_keypairs(VALIDATOR_COUNT),
// Ensure we always have an aggregator for each slot.
u64::max_value(),
StoreConfig::default(),
);
Self { harness }
}
/// Get a value from the `ForkChoice` instantiation.
fn get<T, U>(&self, func: T) -> U
where
T: Fn(&BeaconForkChoiceStore<E, MemoryStore<E>, MemoryStore<E>>) -> U,
{
func(&self.harness.chain.fork_choice.read().fc_store())
}
/// Assert the epochs match.
pub fn assert_finalized_epoch(self, epoch: u64) -> Self {
assert_eq!(
self.get(|fc_store| fc_store.finalized_checkpoint().epoch),
Epoch::new(epoch),
"finalized_epoch"
);
self
}
/// Assert the epochs match.
pub fn assert_justified_epoch(self, epoch: u64) -> Self {
assert_eq!(
self.get(|fc_store| fc_store.justified_checkpoint().epoch),
Epoch::new(epoch),
"justified_epoch"
);
self
}
/// Assert the epochs match.
pub fn assert_best_justified_epoch(self, epoch: u64) -> Self {
assert_eq!(
self.get(|fc_store| fc_store.best_justified_checkpoint().epoch),
Epoch::new(epoch),
"best_justified_epoch"
);
self
}
/// Inspect the queued attestations in fork choice.
pub fn inspect_queued_attestations<F>(self, mut func: F) -> Self
where
F: FnMut(&[QueuedAttestation]),
{
self.harness
.chain
.fork_choice
.write()
.update_time(self.harness.chain.slot().unwrap())
.unwrap();
func(self.harness.chain.fork_choice.read().queued_attestations());
self
}
/// Skip a slot, without producing a block.
pub fn skip_slot(self) -> Self {
self.harness.advance_slot();
self
}
/// Build the chain whilst `predicate` returns `true`.
pub fn apply_blocks_while<F>(self, mut predicate: F) -> Self
where
F: FnMut(&BeaconBlock<E>, &BeaconState<E>) -> bool,
{
self.harness.advance_slot();
self.harness.extend_chain_while(
|block, state| predicate(&block.message, state),
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
self
}
/// Apply `count` blocks to the chain (with attestations).
pub fn apply_blocks(self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
self
}
/// Apply `count` blocks to the chain (without attestations).
pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::SomeValidators(vec![]),
);
self
}
/// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range.
///
/// If the chain is presently in an unsafe period, transition through it and the following safe
/// period.
pub fn move_to_next_unsafe_period(self) -> Self {
self.move_inside_safe_to_update()
.move_outside_safe_to_update()
}
/// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range.
pub fn move_outside_safe_to_update(self) -> Self {
while is_safe_to_update(self.harness.chain.slot().unwrap()) {
self.harness.advance_slot()
}
self
}
/// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range.
pub fn move_inside_safe_to_update(self) -> Self {
while !is_safe_to_update(self.harness.chain.slot().unwrap()) {
self.harness.advance_slot()
}
self
}
/// Applies a block directly to fork choice, bypassing the beacon chain.
///
/// Asserts the block was applied successfully.
pub fn apply_block_directly_to_fork_choice<F>(self, mut func: F) -> Self
where
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
{
let (mut block, mut state) = self.harness.get_block();
func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
self.harness
.chain
.fork_choice
.write()
.on_block(current_slot, &block.message, block.canonical_root(), &state)
.unwrap();
self
}
/// Applies a block directly to fork choice, bypassing the beacon chain.
///
/// Asserts that an error occurred and allows inspecting it via `comparison_func`.
pub fn apply_invalid_block_directly_to_fork_choice<F, G>(
self,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
where
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
G: FnMut(ForkChoiceError),
{
let (mut block, mut state) = self.harness.get_block();
mutation_func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
let err = self
.harness
.chain
.fork_choice
.write()
.on_block(current_slot, &block.message, block.canonical_root(), &state)
.err()
.expect("on_block did not return an error");
comparison_func(err);
self
}
/// Compares the justified balances in the `ForkChoiceStore` verses a direct lookup from the
/// database.
fn check_justified_balances(&self) {
let harness = &self.harness;
let fc = self.harness.chain.fork_choice.read();
let state_root = harness
.chain
.store
.get_item::<SignedBeaconBlock<E>>(&fc.fc_store().justified_checkpoint().root)
.unwrap()
.unwrap()
.message
.state_root;
let state = harness
.chain
.store
.get_state(&state_root, None)
.unwrap()
.unwrap();
let balances = state
.validators
.into_iter()
.map(|v| {
if v.is_active_at(state.current_epoch()) {
v.effective_balance
} else {
0
}
})
.collect::<Vec<_>>();
assert_eq!(
&balances[..],
fc.fc_store().justified_balances(),
"balances should match"
)
}
/// Returns an attestation that is valid for some slot in the given `chain`.
///
/// Also returns some info about who created it.
fn apply_attestation_to_chain<F, G>(
self,
delay: MutationDelay,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
where
F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<HarnessType<E>>),
G: FnMut(Result<(), BeaconChainError>),
{
let chain = &self.harness.chain;
let head = chain.head().expect("should get head");
let current_slot = chain.slot().expect("should get slot");
let mut attestation = chain
.produce_unaggregated_attestation(current_slot, 0)
.expect("should not error while producing attestation");
let validator_committee_index = 0;
let validator_index = *head
.beacon_state
.get_beacon_committee(current_slot, attestation.data.index)
.expect("should get committees")
.committee
.get(validator_committee_index)
.expect("there should be an attesting validator");
let committee_count = head
.beacon_state
.get_committee_count_at_slot(current_slot)
.expect("should not error while getting committee count");
let subnet_id =
SubnetId::compute_subnet::<E>(current_slot, 0, committee_count, &chain.spec)
.expect("should compute subnet id");
let validator_sk = generate_deterministic_keypair(validator_index).sk;
attestation
.sign(
&validator_sk,
validator_committee_index,
&head.beacon_state.fork,
chain.genesis_validators_root,
&chain.spec,
)
.expect("should sign attestation");
let mut verified_attestation = chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
.expect("precondition: should gossip verify attestation");
if let MutationDelay::Blocks(slots) = delay {
self.harness.advance_slot();
self.harness.extend_chain(
slots,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::SomeValidators(vec![]),
);
}
mutation_func(verified_attestation.__indexed_attestation_mut(), chain);
let result = chain.apply_attestation_to_fork_choice(&verified_attestation);
comparison_func(result);
self
}
}
fn is_safe_to_update(slot: Slot) -> bool {
slot % E::slots_per_epoch() < SAFE_SLOTS_TO_UPDATE_JUSTIFIED
}
/// - The new justified checkpoint descends from the current.
/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`
#[test]
fn justified_checkpoint_updates_with_descendent_inside_safe_slots() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.move_inside_safe_to_update()
.assert_justified_epoch(0)
.apply_blocks(1)
.assert_justified_epoch(2);
}
/// - The new justified checkpoint descends from the current.
/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`
/// - This is **not** the first justification since genesis
#[test]
fn justified_checkpoint_updates_with_descendent_outside_safe_slots() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch <= 2)
.move_outside_safe_to_update()
.assert_justified_epoch(2)
.assert_best_justified_epoch(2)
.apply_blocks(1)
.assert_justified_epoch(3);
}
/// - The new justified checkpoint descends from the current.
/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`
/// - This is the first justification since genesis
#[test]
fn justified_checkpoint_updates_first_justification_outside_safe_to_update() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.move_to_next_unsafe_period()
.assert_justified_epoch(0)
.assert_best_justified_epoch(0)
.apply_blocks(1)
.assert_justified_epoch(2)
.assert_best_justified_epoch(2);
}
/// - The new justified checkpoint **does not** descend from the current.
/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`
/// - Finalized epoch has **not** increased.
#[test]
fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks(1)
.move_inside_safe_to_update()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should not change.
state.finalized_checkpoint.epoch = Epoch::new(0);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
.assert_justified_epoch(3)
.assert_best_justified_epoch(3);
}
/// - The new justified checkpoint **does not** descend from the current.
/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`.
/// - Finalized epoch has **not** increased.
#[test]
fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks(1)
.move_to_next_unsafe_period()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should not change.
state.finalized_checkpoint.epoch = Epoch::new(0);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
.assert_justified_epoch(2)
.assert_best_justified_epoch(3);
}
/// - The new justified checkpoint **does not** descend from the current.
/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`
/// - Finalized epoch has increased.
#[test]
fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks(1)
.move_to_next_unsafe_period()
.assert_justified_epoch(2)
.apply_block_directly_to_fork_choice(|_, state| {
// The finalized checkpoint should change.
state.finalized_checkpoint.epoch = Epoch::new(1);
// The justified checkpoint has changed.
state.current_justified_checkpoint.epoch = Epoch::new(3);
// The new block should **not** include the current justified block as an ancestor.
state.current_justified_checkpoint.root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
})
.assert_justified_epoch(3)
.assert_best_justified_epoch(3);
}
/// Check that the balances are obtained correctly.
#[test]
fn justified_balances() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0)
.apply_blocks(1)
.assert_justified_epoch(2)
.check_justified_balances()
}
macro_rules! assert_invalid_block {
($err: tt, $($error: pat) |+ $( if $guard: expr )?) => {
assert!(
matches!(
$err,
$( ForkChoiceError::InvalidBlock($error) ) |+ $( if $guard )?
),
);
};
}
/// Specification v0.12.1
///
/// assert block.parent_root in store.block_states
#[test]
fn invalid_block_unknown_parent() {
let junk = Hash256::from_low_u64_be(42);
ForkChoiceTest::new()
.apply_blocks(2)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.parent_root = junk;
},
|err| {
assert_invalid_block!(
err,
InvalidBlock::UnknownParent(parent)
if parent == junk
)
},
);
}
/// Specification v0.12.1
///
/// assert get_current_slot(store) >= block.slot
#[test]
fn invalid_block_future_slot() {
ForkChoiceTest::new()
.apply_blocks(2)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.slot = block.slot + 1;
},
|err| {
assert_invalid_block!(
err,
InvalidBlock::FutureSlot { .. }
)
},
);
}
/// Specification v0.12.1
///
/// assert block.slot > finalized_slot
#[test]
fn invalid_block_finalized_slot() {
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks(1)
.apply_invalid_block_directly_to_fork_choice(
|block, _| {
block.slot = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1;
},
|err| {
assert_invalid_block!(
err,
InvalidBlock::FinalizedSlot { finalized_slot, .. }
if finalized_slot == Epoch::new(2).start_slot(E::slots_per_epoch())
)
},
);
}
/// Specification v0.12.1
///
/// assert get_ancestor(store, hash_tree_root(block), finalized_slot) ==
/// store.finalized_checkpoint.root
///
/// Note: we technically don't do this exact check, but an equivalent check. Reference:
///
/// https://github.com/ethereum/eth2.0-specs/pull/1884
#[test]
fn invalid_block_finalized_descendant() {
let invalid_ancestor = Mutex::new(Hash256::zero());
ForkChoiceTest::new()
.apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0)
.apply_blocks(1)
.assert_finalized_epoch(2)
.apply_invalid_block_directly_to_fork_choice(
|block, state| {
block.parent_root = *state
.get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch()))
.unwrap();
*invalid_ancestor.lock().unwrap() = block.parent_root;
},
|err| {
assert_invalid_block!(
err,
InvalidBlock::NotFinalizedDescendant { block_ancestor, .. }
if block_ancestor == Some(*invalid_ancestor.lock().unwrap())
)
},
);
}
macro_rules! assert_invalid_attestation {
($err: tt, $($error: pat) |+ $( if $guard: expr )?) => {
assert!(
matches!(
$err,
$( Err(BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation($error))) ) |+ $( if $guard )?
),
"{:?}",
$err
);
};
}
/// Ensure we can process a valid attestation.
#[test]
fn valid_attestation() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|_, _| {},
|result| assert_eq!(result.unwrap(), ()),
);
}
/// This test is not in the specification, however we reject an attestation with an empty
/// aggregation bitfield since it has no purpose beyond wasting our time.
#[test]
fn invalid_attestation_empty_bitfield() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.attesting_indices = vec![].into();
},
|result| {
assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield)
},
);
}
/// Specification v0.12.1:
///
/// assert target.epoch in [expected_current_epoch, previous_epoch]
///
/// (tests epoch after current epoch)
#[test]
fn invalid_attestation_future_epoch() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.data.target.epoch = Epoch::new(2);
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::FutureEpoch { attestation_epoch, current_epoch }
if attestation_epoch == Epoch::new(2) && current_epoch == Epoch::new(0)
)
},
);
}
/// Specification v0.12.1:
///
/// assert target.epoch in [expected_current_epoch, previous_epoch]
///
/// (tests epoch prior to previous epoch)
#[test]
fn invalid_attestation_past_epoch() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(E::slots_per_epoch() as usize * 3 + 1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.data.target.epoch = Epoch::new(0);
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::PastEpoch { attestation_epoch, current_epoch }
if attestation_epoch == Epoch::new(0) && current_epoch == Epoch::new(3)
)
},
);
}
/// Specification v0.12.1:
///
/// assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
#[test]
fn invalid_attestation_target_epoch() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(E::slots_per_epoch() as usize + 1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.data.slot = Slot::new(1);
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::BadTargetEpoch { target, slot }
if target == Epoch::new(1) && slot == Slot::new(1)
)
},
);
}
/// Specification v0.12.1:
///
/// assert target.root in store.blocks
#[test]
fn invalid_attestation_unknown_target_root() {
let junk = Hash256::from_low_u64_be(42);
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.data.target.root = junk;
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::UnknownTargetRoot(root)
if root == junk
)
},
);
}
/// Specification v0.12.1:
///
/// assert attestation.data.beacon_block_root in store.blocks
#[test]
fn invalid_attestation_unknown_beacon_block_root() {
let junk = Hash256::from_low_u64_be(42);
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, _| {
attestation.data.beacon_block_root = junk;
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::UnknownHeadBlock { beacon_block_root }
if beacon_block_root == junk
)
},
);
}
/// Specification v0.12.1:
///
/// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
#[test]
fn invalid_attestation_future_block() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::Blocks(1),
|attestation, chain| {
attestation.data.beacon_block_root = chain
.block_at_slot(chain.slot().unwrap())
.unwrap()
.unwrap()
.canonical_root();
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::AttestsToFutureBlock { block, attestation }
if block == 2 && attestation == 1
)
},
);
}
/// Specification v0.12.1:
///
/// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
#[test]
fn invalid_attestation_inconsistent_ffg_vote() {
let local_opt = Mutex::new(None);
let attestation_opt = Mutex::new(None);
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|attestation, chain| {
attestation.data.target.root = chain
.block_at_slot(Slot::new(1))
.unwrap()
.unwrap()
.canonical_root();
*attestation_opt.lock().unwrap() = Some(attestation.data.target.root);
*local_opt.lock().unwrap() = Some(
chain
.block_at_slot(Slot::new(0))
.unwrap()
.unwrap()
.canonical_root(),
);
},
|result| {
assert_invalid_attestation!(
result,
InvalidAttestation::InvalidTarget { attestation, local }
if attestation == attestation_opt.lock().unwrap().unwrap()
&& local == local_opt.lock().unwrap().unwrap()
)
},
);
}
/// Specification v0.12.1:
///
/// assert get_current_slot(store) >= attestation.data.slot + 1
#[test]
fn invalid_attestation_delayed_slot() {
ForkChoiceTest::new()
.apply_blocks_without_new_attestations(1)
.inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0))
.apply_attestation_to_chain(
MutationDelay::NoDelay,
|_, _| {},
|result| assert_eq!(result.unwrap(), ()),
)
.inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1))
.skip_slot()
.inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0));
}

View File

@ -1,15 +1,14 @@
[package]
name = "proto_array_fork_choice"
name = "proto_array"
version = "0.2.0"
authors = ["Paul Hauner <paul@sigmaprime.io>"]
edition = "2018"
[[bin]]
name = "proto_array_fork_choice"
name = "proto_array"
path = "src/bin.rs"
[dependencies]
parking_lot = "0.10.2"
types = { path = "../types" }
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"

View File

@ -1,4 +1,4 @@
use proto_array_fork_choice::fork_choice_test_definition::*;
use proto_array::fork_choice_test_definition::*;
use serde_yaml;
use std::fs::File;

View File

@ -2,7 +2,7 @@ mod ffg_updates;
mod no_votes;
mod votes;
use crate::proto_array_fork_choice::ProtoArrayForkChoice;
use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice};
use serde_derive::{Deserialize, Serialize};
use types::{Epoch, Hash256, Slot};
@ -55,7 +55,7 @@ pub struct ForkChoiceTestDefinition {
impl ForkChoiceTestDefinition {
pub fn run(self) {
let fork_choice = ProtoArrayForkChoice::new(
let mut fork_choice = ProtoArrayForkChoice::new(
self.finalized_block_slot,
Hash256::zero(),
self.justified_epoch,
@ -119,18 +119,21 @@ impl ForkChoiceTestDefinition {
justified_epoch,
finalized_epoch,
} => {
fork_choice
.process_block(
slot,
root,
parent_root,
Hash256::zero(),
justified_epoch,
finalized_epoch,
let block = Block {
slot,
root,
parent_root: Some(parent_root),
state_root: Hash256::zero(),
target_root: Hash256::zero(),
justified_epoch,
finalized_epoch,
};
fork_choice.process_block(block).unwrap_or_else(|e| {
panic!(
"process_block op at index {} returned error: {:?}",
op_index, e
)
.unwrap_or_else(|_| {
panic!("process_block op at index {} returned error", op_index)
});
});
check_bytes_round_trip(&fork_choice);
}
Operation::ProcessAttestation {

View File

@ -4,7 +4,7 @@ mod proto_array;
mod proto_array_fork_choice;
mod ssz_container;
pub use crate::proto_array_fork_choice::ProtoArrayForkChoice;
pub use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice};
pub use error::Error;
pub mod core {

View File

@ -1,4 +1,4 @@
use crate::error::Error;
use crate::{error::Error, Block};
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
@ -12,10 +12,16 @@ pub struct ProtoNode {
/// The `state_root` is not necessary for `ProtoArray` either, it also just exists for upstream
/// components (namely attestation verification).
pub state_root: Hash256,
root: Hash256,
parent: Option<usize>,
justified_epoch: Epoch,
finalized_epoch: Epoch,
/// The root that would be used for the `attestation.data.target.root` if a LMD vote was cast
/// for this block.
///
/// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream
/// components (namely fork choice attestation verification).
pub target_root: Hash256,
pub root: Hash256,
pub parent: Option<usize>,
pub justified_epoch: Epoch,
pub finalized_epoch: Epoch,
weight: u64,
best_child: Option<usize>,
best_descendant: Option<usize>,
@ -124,29 +130,24 @@ impl ProtoArray {
/// Register a block with the fork choice.
///
/// It is only sane to supply a `None` parent for the genesis block.
pub fn on_block(
&mut self,
slot: Slot,
root: Hash256,
parent_opt: Option<Hash256>,
state_root: Hash256,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> Result<(), Error> {
pub fn on_block(&mut self, block: Block) -> Result<(), Error> {
// If the block is already known, simply ignore it.
if self.indices.contains_key(&root) {
if self.indices.contains_key(&block.root) {
return Ok(());
}
let node_index = self.nodes.len();
let node = ProtoNode {
slot,
state_root,
root,
parent: parent_opt.and_then(|parent| self.indices.get(&parent).copied()),
justified_epoch,
finalized_epoch,
slot: block.slot,
root: block.root,
target_root: block.target_root,
state_root: block.state_root,
parent: block
.parent_root
.and_then(|parent| self.indices.get(&parent).copied()),
justified_epoch: block.justified_epoch,
finalized_epoch: block.finalized_epoch,
weight: 0,
best_child: None,
best_descendant: None,

View File

@ -1,11 +1,9 @@
use crate::error::Error;
use crate::proto_array::ProtoArray;
use crate::ssz_container::SszContainer;
use parking_lot::{RwLock, RwLockReadGuard};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use std::ptr;
use types::{Epoch, Hash256, Slot};
pub const DEFAULT_PRUNE_THRESHOLD: usize = 256;
@ -17,6 +15,19 @@ pub struct VoteTracker {
next_epoch: Epoch,
}
/// A block that is to be applied to the fork choice.
///
/// A simplified version of `types::BeaconBlock`.
pub struct Block {
pub slot: Slot,
pub root: Hash256,
pub parent_root: Option<Hash256>,
pub state_root: Hash256,
pub target_root: Hash256,
pub justified_epoch: Epoch,
pub finalized_epoch: Epoch,
}
/// A Vec-wrapper which will grow to match any request.
///
/// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using
@ -44,21 +55,11 @@ where
}
}
#[derive(PartialEq)]
pub struct ProtoArrayForkChoice {
pub(crate) proto_array: RwLock<ProtoArray>,
pub(crate) votes: RwLock<ElasticList<VoteTracker>>,
pub(crate) balances: RwLock<Vec<u64>>,
}
impl PartialEq for ProtoArrayForkChoice {
fn eq(&self, other: &Self) -> bool {
if ptr::eq(self, other) {
return true;
}
*self.proto_array.read() == *other.proto_array.read()
&& *self.votes.read() == *other.votes.read()
&& *self.balances.read() == *other.balances.read()
}
pub(crate) proto_array: ProtoArray,
pub(crate) votes: ElasticList<VoteTracker>,
pub(crate) balances: Vec<u64>,
}
impl ProtoArrayForkChoice {
@ -77,32 +78,36 @@ impl ProtoArrayForkChoice {
indices: HashMap::with_capacity(1),
};
let block = Block {
slot: finalized_block_slot,
root: finalized_root,
parent_root: None,
state_root: finalized_block_state_root,
// We are using the finalized_root as the target_root, since it always lies on an
// epoch boundary.
target_root: finalized_root,
justified_epoch,
finalized_epoch,
};
proto_array
.on_block(
finalized_block_slot,
finalized_root,
None,
finalized_block_state_root,
justified_epoch,
finalized_epoch,
)
.on_block(block)
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
Ok(Self {
proto_array: RwLock::new(proto_array),
votes: RwLock::new(ElasticList::default()),
balances: RwLock::new(vec![]),
proto_array: proto_array,
votes: ElasticList::default(),
balances: vec![],
})
}
pub fn process_attestation(
&self,
&mut self,
validator_index: usize,
block_root: Hash256,
target_epoch: Epoch,
) -> Result<(), String> {
let mut votes = self.votes.write();
let vote = votes.get_mut(validator_index);
let vote = self.votes.get_mut(validator_index);
if target_epoch > vote.next_epoch || *vote == VoteTracker::default() {
vote.next_root = block_root;
@ -112,102 +117,86 @@ impl ProtoArrayForkChoice {
Ok(())
}
pub fn process_block(
&self,
slot: Slot,
block_root: Hash256,
parent_root: Hash256,
state_root: Hash256,
justified_epoch: Epoch,
finalized_epoch: Epoch,
) -> Result<(), String> {
pub fn process_block(&mut self, block: Block) -> Result<(), String> {
if block.parent_root.is_none() {
return Err("Missing parent root".to_string());
}
self.proto_array
.write()
.on_block(
slot,
block_root,
Some(parent_root),
state_root,
justified_epoch,
finalized_epoch,
)
.on_block(block)
.map_err(|e| format!("process_block_error: {:?}", e))
}
pub fn find_head(
&self,
&mut self,
justified_epoch: Epoch,
justified_root: Hash256,
finalized_epoch: Epoch,
justified_state_balances: &[u64],
) -> Result<Hash256, String> {
let mut proto_array = self.proto_array.write();
let mut votes = self.votes.write();
let mut old_balances = self.balances.write();
let old_balances = &mut self.balances;
let new_balances = justified_state_balances;
let deltas = compute_deltas(
&proto_array.indices,
&mut votes,
&self.proto_array.indices,
&mut self.votes,
&old_balances,
&new_balances,
)
.map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?;
proto_array
self.proto_array
.apply_score_changes(deltas, justified_epoch, finalized_epoch)
.map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?;
*old_balances = new_balances.to_vec();
proto_array
self.proto_array
.find_head(&justified_root)
.map_err(|e| format!("find_head failed: {:?}", e))
}
pub fn maybe_prune(&self, finalized_root: Hash256) -> Result<(), String> {
pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), String> {
self.proto_array
.write()
.maybe_prune(finalized_root)
.map_err(|e| format!("find_head maybe_prune failed: {:?}", e))
}
pub fn set_prune_threshold(&self, prune_threshold: usize) {
self.proto_array.write().prune_threshold = prune_threshold;
pub fn set_prune_threshold(&mut self, prune_threshold: usize) {
self.proto_array.prune_threshold = prune_threshold;
}
pub fn len(&self) -> usize {
self.proto_array.read().nodes.len()
self.proto_array.nodes.len()
}
pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.proto_array.read().indices.contains_key(block_root)
self.proto_array.indices.contains_key(block_root)
}
pub fn block_slot(&self, block_root: &Hash256) -> Option<Slot> {
let proto_array = self.proto_array.read();
pub fn get_block(&self, block_root: &Hash256) -> Option<Block> {
let block_index = self.proto_array.indices.get(block_root)?;
let block = self.proto_array.nodes.get(*block_index)?;
let parent_root = block
.parent
.and_then(|i| self.proto_array.nodes.get(i))
.map(|parent| parent.root);
let i = proto_array.indices.get(block_root)?;
let block = proto_array.nodes.get(*i)?;
Some(block.slot)
}
pub fn block_slot_and_state_root(&self, block_root: &Hash256) -> Option<(Slot, Hash256)> {
let proto_array = self.proto_array.read();
let i = proto_array.indices.get(block_root)?;
let block = proto_array.nodes.get(*i)?;
Some((block.slot, block.state_root))
Some(Block {
slot: block.slot,
root: block.root,
parent_root,
state_root: block.state_root,
target_root: block.target_root,
justified_epoch: block.justified_epoch,
finalized_epoch: block.finalized_epoch,
})
}
pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> {
let votes = self.votes.read();
if validator_index < votes.0.len() {
let vote = &votes.0[validator_index];
if validator_index < self.votes.0.len() {
let vote = &self.votes.0[validator_index];
if *vote == VoteTracker::default() {
None
@ -232,8 +221,8 @@ impl ProtoArrayForkChoice {
/// Returns a read-lock to core `ProtoArray` struct.
///
/// Should only be used when encoding/decoding during troubleshooting.
pub fn core_proto_array(&self) -> RwLockReadGuard<ProtoArray> {
self.proto_array.read()
pub fn core_proto_array(&self) -> &ProtoArray {
&self.proto_array
}
}

Some files were not shown because too many files have changed in this diff Show More