From 8f4a2fbde1cb53a90d98c17f7abbf3e363246d02 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 6 Mar 2019 14:46:12 +1100 Subject: [PATCH 01/33] Implement transaction pool basics --- Cargo.toml | 1 + eth2/operation_pool/Cargo.toml | 9 + eth2/operation_pool/src/lib.rs | 308 ++++++++++++++++++ .../src/per_block_processing.rs | 11 +- .../src/per_block_processing/errors.rs | 5 + .../per_block_processing/verify_deposit.rs | 6 +- .../src/per_block_processing/verify_exit.rs | 8 +- .../per_block_processing/verify_transfer.rs | 38 ++- eth2/types/Cargo.toml | 1 + eth2/types/src/transfer.rs | 5 +- 10 files changed, 377 insertions(+), 15 deletions(-) create mode 100644 eth2/operation_pool/Cargo.toml create mode 100644 eth2/operation_pool/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index cb070cc2d..00be99e32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ members = [ "eth2/attester", "eth2/block_proposer", "eth2/fork_choice", + "eth2/operation_pool", "eth2/state_processing", "eth2/types", "eth2/utils/bls", diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml new file mode 100644 index 000000000..e68d318b5 --- /dev/null +++ b/eth2/operation_pool/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "operation_pool" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +[dependencies] +types = { path = "../types" } +state_processing = { path = "../../eth2/state_processing" } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs new file mode 100644 index 000000000..7c2525099 --- /dev/null +++ b/eth2/operation_pool/src/lib.rs @@ -0,0 +1,308 @@ +use std::collections::{btree_map::Entry, BTreeMap, HashSet}; + +use state_processing::per_block_processing::{ + verify_deposit_merkle_proof, verify_exit, verify_proposer_slashing, verify_transfer, + verify_transfer_partial, +}; +use types::{ + AttesterSlashing, BeaconState, ChainSpec, Deposit, ProposerSlashing, Transfer, VoluntaryExit, +}; + +#[cfg(test)] +const VERIFY_DEPOSIT_PROOFS: bool = false; +#[cfg(not(test))] +const VERIFY_DEPOSIT_PROOFS: bool = true; + +#[derive(Default)] +pub struct OperationPool { + /// Map from deposit index to deposit data. + // NOTE: We assume that there is only one deposit per index + // because the Eth1 data is updated (at most) once per epoch, + // and the spec doesn't seem to accomodate for re-orgs on a time-frame + // longer than an epoch + deposits: BTreeMap, + /// Map from attester index to slashing. + attester_slashings: BTreeMap, + /// Map from proposer index to slashing. + proposer_slashings: BTreeMap, + /// Map from exiting validator to their exit data. + voluntary_exits: BTreeMap, + /// Set of transfers. + transfers: HashSet, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum DepositInsertStatus { + /// The deposit was not already in the pool. + Fresh, + /// The deposit already existed in the pool. + Duplicate, + /// The deposit conflicted with an existing deposit, which was replaced. + Replaced(Deposit), +} + +impl OperationPool { + /// Create a new operation pool. + pub fn new() -> Self { + Self::default() + } + + /// Add a deposit to the pool. + /// + /// No two distinct deposits should be added with the same index. + pub fn insert_deposit(&mut self, deposit: Deposit) -> DepositInsertStatus { + use DepositInsertStatus::*; + + match self.deposits.entry(deposit.index) { + Entry::Vacant(entry) => { + entry.insert(deposit); + Fresh + } + Entry::Occupied(mut entry) => { + if entry.get() == &deposit { + Duplicate + } else { + Replaced(entry.insert(deposit)) + } + } + } + } + + /// Get an ordered list of deposits for inclusion in a block. + /// + /// Take at most the maximum number of deposits, beginning from the current deposit index. + pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + let start_idx = state.deposit_index; + (start_idx..start_idx + spec.max_deposits) + .map(|idx| self.deposits.get(&idx)) + .take_while(|deposit| { + // NOTE: we don't use verify_deposit, because it requires the + // deposit's index to match the state's, and we would like to return + // a batch with increasing indices + deposit.map_or(false, |deposit| { + !VERIFY_DEPOSIT_PROOFS || verify_deposit_merkle_proof(state, deposit, spec) + }) + }) + .flatten() + .cloned() + .collect() + } + + /// Remove all deposits with index less than the deposit index of the latest finalised block. + pub fn prune_deposits(&mut self, state: &BeaconState) -> BTreeMap { + let deposits_keep = self.deposits.split_off(&state.deposit_index); + std::mem::replace(&mut self.deposits, deposits_keep) + } + + /// Insert a proposer slashing into the pool. + pub fn insert_proposer_slashing( + &mut self, + slashing: ProposerSlashing, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), ()> { + // TODO: should maybe insert anyway if the proposer is unknown in the validator index, + // because they could *become* known later + // FIXME: error handling + verify_proposer_slashing(&slashing, state, spec).map_err(|_| ())?; + self.proposer_slashings + .insert(slashing.proposer_index, slashing); + Ok(()) + } + + /// Only check whether the implicated validator has already been slashed, because + /// all slashings in the pool were validated upon insertion. + // TODO: we need a mechanism to avoid including a proposer slashing and an attester + // slashing for the same validator in the same block + pub fn get_proposer_slashings( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + // We sort by validator index, which is safe, because a validator can only supply + // so many valid slashings for lower-indexed validators (and even that is unlikely) + filter_limit_operations( + self.proposer_slashings.values(), + |slashing| { + state + .validator_registry + .get(slashing.proposer_index as usize) + .map_or(false, |validator| !validator.slashed) + }, + spec.max_proposer_slashings, + ) + } + + /// Prune slashings for all slashed or withdrawn validators. + pub fn prune_proposer_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + let to_prune = self + .proposer_slashings + .keys() + .flat_map(|&validator_index| { + finalized_state + .validator_registry + .get(validator_index as usize) + .filter(|validator| { + validator.slashed + || validator.is_withdrawable_at(finalized_state.current_epoch(spec)) + }) + .map(|_| validator_index) + }) + .collect::>(); + + for validator_index in to_prune { + self.proposer_slashings.remove(&validator_index); + } + } + + // TODO: copy ProposerSlashing code for AttesterSlashing + + /// Insert a voluntary exit, validating it almost-entirely (future exits are permitted). + pub fn insert_voluntary_exit( + &mut self, + exit: VoluntaryExit, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), ()> { + verify_exit(state, &exit, spec, false).map_err(|_| ())?; + self.voluntary_exits.insert(exit.validator_index, exit); + Ok(()) + } + + /// Get a list of voluntary exits for inclusion in a block. + // TODO: could optimise this by eliding the checks that have already been done on insert + pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + filter_limit_operations( + self.voluntary_exits.values(), + |exit| verify_exit(state, exit, spec, true).is_ok(), + spec.max_voluntary_exits, + ) + } + + /// Prune if validator has already exited at the last finalized state. + pub fn prune_voluntary_exits(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + let to_prune = self + .voluntary_exits + .keys() + .flat_map(|&validator_index| { + finalized_state + .validator_registry + .get(validator_index as usize) + .filter(|validator| validator.is_exited_at(finalized_state.current_epoch(spec))) + .map(|_| validator_index) + }) + .collect::>(); + + for validator_index in to_prune { + self.voluntary_exits.remove(&validator_index); + } + } + + /// Insert a transfer into the pool, checking it for validity in the process. + pub fn insert_transfer( + &mut self, + transfer: Transfer, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), ()> { + // The signature of the transfer isn't hashed, but because we check + // it before we insert into the HashSet, we can't end up with duplicate + // transactions. + verify_transfer_partial(state, &transfer, spec, true).map_err(|_| ())?; + self.transfers.insert(transfer); + Ok(()) + } + + /// Get a list of transfers for inclusion in a block. + // TODO: improve the economic optimality of this function by taking the transfer + // fees into account, and dependencies between transfers in the same block + // e.g. A pays B, B pays C + pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + filter_limit_operations( + &self.transfers, + |transfer| verify_transfer(state, transfer, spec).is_ok(), + spec.max_transfers, + ) + } + + /// Prune the set of transfers by removing all those whose slot has already passed. + pub fn prune_transfers(&mut self, finalized_state: &BeaconState) { + self.transfers = self + .transfers + .drain() + .filter(|transfer| transfer.slot > finalized_state.slot) + .collect(); + } +} + +/// Filter up to a maximum number of operations out of a slice. +fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec +where + I: IntoIterator, + F: Fn(&T) -> bool, + T: Clone, +{ + operations + .into_iter() + .filter(|x| filter(*x)) + .take(limit as usize) + .cloned() + .collect() +} + +#[cfg(test)] +mod tests { + use super::DepositInsertStatus::*; + use super::*; + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + + #[test] + fn insert_deposit() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let mut op_pool = OperationPool::new(); + let deposit1 = Deposit::random_for_test(&mut rng); + let mut deposit2 = Deposit::random_for_test(&mut rng); + deposit2.index = deposit1.index; + + assert_eq!(op_pool.insert_deposit(deposit1.clone()), Fresh); + assert_eq!(op_pool.insert_deposit(deposit1.clone()), Duplicate); + assert_eq!(op_pool.insert_deposit(deposit2), Replaced(deposit1)); + } + + #[test] + fn get_deposits_max() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let mut op_pool = OperationPool::new(); + let spec = ChainSpec::foundation(); + let start = 10000; + let max_deposits = spec.max_deposits; + let extra = 5; + let offset = 1; + assert!(offset <= extra); + + let proto_deposit = Deposit::random_for_test(&mut rng); + let deposits = (start..start + max_deposits + extra) + .map(|index| { + let mut deposit = proto_deposit.clone(); + deposit.index = index; + deposit + }) + .collect::>(); + + for deposit in &deposits { + assert_eq!(op_pool.insert_deposit(deposit.clone()), Fresh); + } + + let mut state = BeaconState::random_for_test(&mut rng); + state.deposit_index = start + offset; + let deposits_for_block = op_pool.get_deposits(&state, &spec); + + assert_eq!(deposits_for_block.len() as u64, max_deposits); + assert_eq!( + deposits_for_block[..], + deposits[offset as usize..(offset + max_deposits) as usize] + ); + } + + // TODO: more tests +} diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index dc83abb3f..e0e359552 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,4 +1,3 @@ -use self::verify_proposer_slashing::verify_proposer_slashing; use crate::common::slash_validator; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use rayon::prelude::*; @@ -8,11 +7,15 @@ use types::*; pub use self::verify_attester_slashing::{ gather_attester_slashing_indices, verify_attester_slashing, }; +pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; -pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; +pub use verify_deposit::{ + get_existing_validator_index, verify_deposit, verify_deposit_index, + verify_deposit_merkle_proof, +}; pub use verify_exit::verify_exit; pub use verify_slashable_attestation::verify_slashable_attestation; -pub use verify_transfer::{execute_transfer, verify_transfer}; +pub use verify_transfer::{execute_transfer, verify_transfer, verify_transfer_partial}; pub mod errors; mod validate_attestation; @@ -426,7 +429,7 @@ pub fn process_exits( .par_iter() .enumerate() .try_for_each(|(i, exit)| { - verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i)) + verify_exit(&state, exit, spec, true).map_err(|e| e.into_with_index(i)) })?; // Update the state in series. diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index c0fe252de..6614f6f60 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -390,6 +390,11 @@ pub enum TransferInvalid { /// /// (state_slot, transfer_slot) StateSlotMismatch(Slot, Slot), + /// The `transfer.slot` is in the past relative to the state slot. + /// + /// + /// (state_slot, transfer_slot) + TransferSlotInPast(Slot, Slot), /// The `transfer.from` validator has been activated and is not withdrawable. /// /// (from_validator) diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index a3a0f5734..1b974d972 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -89,7 +89,11 @@ pub fn get_existing_validator_index( /// Verify that a deposit is included in the state's eth1 deposit root. /// /// Spec v0.5.0 -fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { +pub fn verify_deposit_merkle_proof( + state: &BeaconState, + deposit: &Deposit, + spec: &ChainSpec, +) -> bool { let leaf = hash(&get_serialized_deposit_data(deposit)); verify_merkle_proof( Hash256::from_slice(&leaf), diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index 7893cea96..14dad3442 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -7,11 +7,17 @@ use types::*; /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// +/// The `check_future_epoch` argument determines whether the exit's epoch should be checked +/// against the state's current epoch to ensure it doesn't occur in the future. +/// It should ordinarily be set to true, except for operations stored for +/// some time (such as in the OperationPool). +/// /// Spec v0.5.0 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, spec: &ChainSpec, + check_future_epoch: bool, ) -> Result<(), Error> { let validator = state .validator_registry @@ -32,7 +38,7 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - state.current_epoch(spec) >= exit.epoch, + !check_future_epoch || state.current_epoch(spec) >= exit.epoch, Invalid::FutureEpoch { state: state.current_epoch(spec), exit: exit.epoch diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index f873cd850..4f3815797 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -15,6 +15,19 @@ pub fn verify_transfer( state: &BeaconState, transfer: &Transfer, spec: &ChainSpec, +) -> Result<(), Error> { + verify_transfer_partial(state, transfer, spec, false) +} + +/// Parametric version of `verify_transfer` that allows some checks to be skipped. +/// +/// In everywhere except the operation pool, `verify_transfer` should be preferred over this +/// function. +pub fn verify_transfer_partial( + state: &BeaconState, + transfer: &Transfer, + spec: &ChainSpec, + for_op_pool: bool, ) -> Result<(), Error> { let sender_balance = *state .validator_balances @@ -27,17 +40,18 @@ pub fn verify_transfer( .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; verify!( - sender_balance >= transfer.amount, + for_op_pool || sender_balance >= transfer.amount, Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) ); verify!( - sender_balance >= transfer.fee, + for_op_pool || sender_balance >= transfer.fee, Invalid::FromBalanceInsufficient(transfer.fee, sender_balance) ); verify!( - (sender_balance == total_amount) + for_op_pool + || (sender_balance == total_amount) || (sender_balance >= (total_amount + spec.min_deposit_amount)), Invalid::InvalidResultingFromBalance( sender_balance - total_amount, @@ -45,10 +59,17 @@ pub fn verify_transfer( ) ); - verify!( - state.slot == transfer.slot, - Invalid::StateSlotMismatch(state.slot, transfer.slot) - ); + if for_op_pool { + verify!( + state.slot <= transfer.slot, + Invalid::TransferSlotInPast(state.slot, transfer.slot) + ); + } else { + verify!( + state.slot == transfer.slot, + Invalid::StateSlotMismatch(state.slot, transfer.slot) + ); + } let sender_validator = state .validator_registry @@ -57,7 +78,8 @@ pub fn verify_transfer( let epoch = state.slot.epoch(spec.slots_per_epoch); verify!( - sender_validator.is_withdrawable_at(epoch) + for_op_pool + || sender_validator.is_withdrawable_at(epoch) || sender_validator.activation_epoch == spec.far_future_epoch, Invalid::FromValidatorIneligableForTransfer(transfer.sender) ); diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index e4ccfd63e..8554b5c54 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" bls = { path = "../utils/bls" } boolean-bitfield = { path = "../utils/boolean-bitfield" } dirs = "1.0" +derivative = "1.0" ethereum-types = "0.5" hashing = { path = "../utils/hashing" } hex = "0.3" diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index 2570d7b3f..4b10ce1ca 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -1,6 +1,7 @@ use super::Slot; use crate::test_utils::TestRandom; use bls::{PublicKey, Signature}; +use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; @@ -12,7 +13,6 @@ use test_random_derive::TestRandom; /// Spec v0.5.0 #[derive( Debug, - PartialEq, Clone, Serialize, Deserialize, @@ -21,7 +21,9 @@ use test_random_derive::TestRandom; TreeHash, TestRandom, SignedRoot, + Derivative, )] +#[derivative(PartialEq, Eq, Hash)] pub struct Transfer { pub sender: u64, pub recipient: u64, @@ -29,6 +31,7 @@ pub struct Transfer { pub fee: u64, pub slot: Slot, pub pubkey: PublicKey, + #[derivative(Hash = "ignore")] pub signature: Signature, } From c2e5d3c45a5adf40c51206e7b04f1199986823ca Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 19 Mar 2019 10:22:14 +1100 Subject: [PATCH 02/33] BLS: fix description of AggregatePublicKey --- eth2/utils/bls/src/aggregate_public_key.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs index 2174a43cb..2e6ee7882 100644 --- a/eth2/utils/bls/src/aggregate_public_key.rs +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -1,7 +1,7 @@ use super::PublicKey; use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey; -/// A single BLS signature. +/// A BLS aggregate public key. /// /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// serialization). @@ -17,7 +17,7 @@ impl AggregatePublicKey { self.0.add(public_key.as_raw()) } - /// Returns the underlying signature. + /// Returns the underlying public key. pub fn as_raw(&self) -> &RawAggregatePublicKey { &self.0 } From 1fca8a063c0118e84c82b69a8d800d109c65861b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 19 Mar 2019 19:15:33 +1100 Subject: [PATCH 03/33] Operation pool: add attestation support --- eth2/operation_pool/Cargo.toml | 5 +- eth2/operation_pool/src/lib.rs | 128 +++++++++++++++++++++++++++++++-- eth2/types/src/attestation.rs | 19 +++++ 3 files changed, 146 insertions(+), 6 deletions(-) diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index e68d318b5..07cb61864 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -5,5 +5,8 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] +int_to_bytes = { path = "../utils/int_to_bytes" } +itertools = "0.8" types = { path = "../types" } -state_processing = { path = "../../eth2/state_processing" } +state_processing = { path = "../state_processing" } +ssz = { path = "../utils/ssz" } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 7c2525099..7a647450c 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -1,11 +1,15 @@ -use std::collections::{btree_map::Entry, BTreeMap, HashSet}; - +use int_to_bytes::int_to_bytes8; +use itertools::Itertools; +use ssz::ssz_encode; use state_processing::per_block_processing::{ - verify_deposit_merkle_proof, verify_exit, verify_proposer_slashing, verify_transfer, - verify_transfer_partial, + validate_attestation, verify_deposit_merkle_proof, verify_exit, verify_proposer_slashing, + verify_transfer, verify_transfer_partial, }; +use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; +use types::chain_spec::Domain; use types::{ - AttesterSlashing, BeaconState, ChainSpec, Deposit, ProposerSlashing, Transfer, VoluntaryExit, + Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch, + ProposerSlashing, Transfer, VoluntaryExit, }; #[cfg(test)] @@ -15,6 +19,8 @@ const VERIFY_DEPOSIT_PROOFS: bool = true; #[derive(Default)] pub struct OperationPool { + /// Map from attestation ID (see below) to vectors of attestations. + attestations: HashMap>, /// Map from deposit index to deposit data. // NOTE: We assume that there is only one deposit per index // because the Eth1 data is updated (at most) once per epoch, @@ -31,6 +37,54 @@ pub struct OperationPool { transfers: HashSet, } +/// Serialized `AttestationData` augmented with a domain to encode the fork info. +#[derive(PartialEq, Eq, Clone, Hash, Debug)] +struct AttestationId(Vec); + +/// Number of domain bytes that the end of an attestation ID is padded with. +const DOMAIN_BYTES_LEN: usize = 8; + +impl AttestationId { + fn from_data(attestation: &AttestationData, state: &BeaconState, spec: &ChainSpec) -> Self { + let mut bytes = ssz_encode(attestation); + let epoch = attestation.slot.epoch(spec.slots_per_epoch); + bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec)); + AttestationId(bytes) + } + + fn compute_domain_bytes(epoch: Epoch, state: &BeaconState, spec: &ChainSpec) -> Vec { + int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork)) + } + + fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool { + &self.0[self.0.len() - DOMAIN_BYTES_LEN..] == domain_bytes + } +} + +/// Compute a fitness score for an attestation. +/// +/// The score is calculated by determining the number of *new* attestations that +/// the aggregate attestation introduces, and is proportional to the size of the reward we will +/// receive for including it in a block. +// TODO: this could be optimised with a map from validator index to whether that validator has +// attested in the *current* epoch. Alternatively, we could cache an index that allows us to +// quickly look up the attestations in the current epoch for a given shard. +fn attestation_score(attestation: &Attestation, state: &BeaconState) -> usize { + // Bitfield of validators whose attestations are new/fresh. + let mut new_validators = attestation.aggregation_bitfield.clone(); + + state + .current_epoch_attestations + .iter() + .filter(|current_attestation| current_attestation.data.shard == attestation.data.shard) + .for_each(|current_attestation| { + // Remove the validators who have signed the existing attestation (they are not new) + new_validators.difference_inplace(¤t_attestation.aggregation_bitfield); + }); + + new_validators.num_set_bits() +} + #[derive(Debug, PartialEq, Clone)] pub enum DepositInsertStatus { /// The deposit was not already in the pool. @@ -47,6 +101,70 @@ impl OperationPool { Self::default() } + /// Insert an attestation into the pool, aggregating it with existing attestations if possible. + pub fn insert_attestation( + &mut self, + attestation: Attestation, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), ()> { + // Check that attestation signatures are valid. + // FIXME: should disable the time-dependent checks. + validate_attestation(state, &attestation, spec).map_err(|_| ())?; + + let id = AttestationId::from_data(&attestation.data, state, spec); + + let existing_attestations = match self.attestations.entry(id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![attestation]); + return Ok(()); + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + let mut aggregated = false; + for existing_attestation in existing_attestations.iter_mut() { + if existing_attestation.signers_disjoint_from(&attestation) { + existing_attestation.aggregate(&attestation); + aggregated = true; + } else if *existing_attestation == attestation { + aggregated = true; + } + } + + if !aggregated { + existing_attestations.push(attestation); + } + + Ok(()) + } + + /// Get a list of attestations for inclusion in a block. + pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + // Attestations for the current fork... + // TODO: should we also check domain bytes for the previous epoch? + let current_epoch = state.slot.epoch(spec.slots_per_epoch); + let domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); + self.attestations + .iter() + .filter(|(key, _)| key.domain_bytes_match(&domain_bytes)) + .flat_map(|(_, attestations)| attestations) + // That are valid... + .filter(|attestation| validate_attestation(state, attestation, spec).is_ok()) + // Scored by the number of new attestations they introduce (descending) + .map(|att| (att, attestation_score(att, state))) + .sorted_by_key(|&(_, score)| std::cmp::Reverse(score)) + // Limited to the maximum number of attestations per block + .take(spec.max_attestations as usize) + .map(|(att, _)| att) + .cloned() + .collect() + } + + pub fn prune_attestations(&self, _finalized_state: &BeaconState, _spec: &ChainSpec) { + // TODO + } + /// Add a deposit to the pool. /// /// No two distinct deposits should be added with the same index. diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 0b660466e..6c572c852 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -28,6 +28,25 @@ pub struct Attestation { pub aggregate_signature: AggregateSignature, } +impl Attestation { + /// Are the aggregation bitfields of these attestations disjoint? + pub fn signers_disjoint_from(&self, other: &Attestation) -> bool { + self.aggregation_bitfield.intersection(&other.aggregation_bitfield).is_zero() + } + + /// Aggregate another Attestation into this one. + /// + /// The aggregation bitfields must be disjoint, and the data must be the same. + pub fn aggregate(&mut self, other: &Attestation) { + debug_assert_eq!(self.data, other.data); + debug_assert!(self.signers_disjoint_from(other)); + + self.aggregation_bitfield.union_inplace(&other.aggregation_bitfield); + self.custody_bitfield.union_inplace(&other.custody_bitfield); + // FIXME: signature aggregation once our BLS library wraps it + } +} + #[cfg(test)] mod tests { use super::*; From 18a7bd243c1d5fe865ef954d3fc799f6d5b9cab7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 19 Mar 2019 19:18:33 +1100 Subject: [PATCH 04/33] Bitfield: implement union/intersection/difference --- eth2/utils/boolean-bitfield/src/lib.rs | 92 ++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index 443cd06da..e37f2488d 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -89,6 +89,11 @@ impl BooleanBitfield { self.len() == 0 } + /// Returns true if all bits are set to 0. + pub fn is_zero(&self) -> bool { + self.0.none() + } + /// Returns the number of bytes required to represent this bitfield. pub fn num_bytes(&self) -> usize { self.to_bytes().len() @@ -104,6 +109,44 @@ impl BooleanBitfield { pub fn to_bytes(&self) -> Vec { self.0.to_bytes() } + + /// Compute the intersection (binary-and) of this bitfield with another. Lengths must match. + pub fn intersection(&self, other: &Self) -> Self { + let mut res = self.clone(); + res.intersection_inplace(other); + res + } + + /// Like `intersection` but in-place (updates `self`). + pub fn intersection_inplace(&mut self, other: &Self) { + self.0.intersect(&other.0); + } + + /// Compute the union (binary-or) of this bitfield with another. Lengths must match. + pub fn union(&self, other: &Self) -> Self { + let mut res = self.clone(); + res.union_inplace(other); + res + } + + /// Like `union` but in-place (updates `self`). + pub fn union_inplace(&mut self, other: &Self) { + self.0.union(&other.0); + } + + /// Compute the difference (binary-minus) of this bitfield with another. Lengths must match. + /// + /// Computes `self - other`. + pub fn difference(&self, other: &Self) -> Self { + let mut res = self.clone(); + res.difference_inplace(other); + res + } + + /// Like `difference` but in-place (updates `self`). + pub fn difference_inplace(&mut self, other: &Self) { + self.0.difference(&other.0); + } } impl default::Default for BooleanBitfield { @@ -427,4 +470,53 @@ mod tests { let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); assert_eq!(c, a & b); } + + #[test] + fn test_is_zero() { + let yes_data: &[&[u8]] = &[&[], &[0], &[0, 0], &[0, 0, 0]]; + for bytes in yes_data { + assert!(BooleanBitfield::from_bytes(bytes).is_zero()); + } + let no_data: &[&[u8]] = &[&[1], &[6], &[0, 1], &[0, 0, 1], &[0, 0, 255]]; + for bytes in no_data { + assert!(!BooleanBitfield::from_bytes(bytes).is_zero()); + } + } + + #[test] + fn test_intersection() { + let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); + let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); + let c = BooleanBitfield::from_bytes(&[0b1000, 0b0001]); + assert_eq!(a.intersection(&b), c); + assert_eq!(b.intersection(&a), c); + assert_eq!(a.intersection(&c), c); + assert_eq!(b.intersection(&c), c); + assert_eq!(a.intersection(&a), a); + assert_eq!(b.intersection(&b), b); + assert_eq!(c.intersection(&c), c); + } + + #[test] + fn test_union() { + let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); + let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); + let c = BooleanBitfield::from_bytes(&[0b1111, 0b1001]); + assert_eq!(a.union(&b), c); + assert_eq!(b.union(&a), c); + assert_eq!(a.union(&a), a); + assert_eq!(b.union(&b), b); + assert_eq!(c.union(&c), c); + } + + #[test] + fn test_difference() { + let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); + let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); + let a_b = BooleanBitfield::from_bytes(&[0b0100, 0b0000]); + let b_a = BooleanBitfield::from_bytes(&[0b0011, 0b1000]); + assert_eq!(a.difference(&b), a_b); + assert_eq!(b.difference(&a), b_a); + assert!(a.difference(&a).is_zero()); + } } From 8a7c51271ed06e75fc0f45894a54ab489b2561bc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 19 Mar 2019 19:19:21 +1100 Subject: [PATCH 05/33] Bitfield: use BitOr instead of BitAnd for union Closes #314 --- eth2/utils/boolean-bitfield/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index e37f2488d..cdd0bc3d7 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -168,10 +168,11 @@ impl cmp::PartialEq for BooleanBitfield { /// Create a new bitfield that is a union of two other bitfields. /// /// For example `union(0101, 1000) == 1101` -impl std::ops::BitAnd for BooleanBitfield { +// TODO: length-independent intersection for BitAnd +impl std::ops::BitOr for BooleanBitfield { type Output = Self; - fn bitand(self, other: Self) -> Self { + fn bitor(self, other: Self) -> Self { let (biggest, smallest) = if self.len() > other.len() { (&self, &other) } else { @@ -464,11 +465,11 @@ mod tests { } #[test] - fn test_bitand() { + fn test_bitor() { let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); - assert_eq!(c, a & b); + assert_eq!(c, a | b); } #[test] From a8224aa4ec05fe467abaf21d7b1a6f1a9239f584 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 10:14:31 +1100 Subject: [PATCH 06/33] Operation pool: add prune_all --- eth2/operation_pool/src/lib.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 7a647450c..93ff74652 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -351,6 +351,16 @@ impl OperationPool { .filter(|transfer| transfer.slot > finalized_state.slot) .collect(); } + + /// Prune all types of transactions given the latest finalized state. + pub fn prune_all(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + self.prune_attestations(finalized_state, spec); + self.prune_deposits(finalized_state); + self.prune_proposer_slashings(finalized_state, spec); + // FIXME: add attester slashings + self.prune_voluntary_exits(finalized_state, spec); + self.prune_transfers(finalized_state); + } } /// Filter up to a maximum number of operations out of a slice. From 9c2dfba843c2210df161537f186eb87cb3c18bda Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 10:47:19 +1100 Subject: [PATCH 07/33] Operation pool: prune attestations --- eth2/operation_pool/src/lib.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 93ff74652..4932a7e0a 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -161,8 +161,18 @@ impl OperationPool { .collect() } - pub fn prune_attestations(&self, _finalized_state: &BeaconState, _spec: &ChainSpec) { - // TODO + /// Remove attestations which are too old to be included in a block. + // TODO: we could probably prune other attestations here: + // - ones that are completely covered by attestations included in the state + // - maybe ones invalidated by the confirmation of one fork over another + pub fn prune_attestations(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + self.attestations.retain(|_, attestations| { + // All the attestations in this bucket have the same data, so we only need to + // check the first one. + attestations.first().map_or(false, |att| { + finalized_state.slot < att.data.slot + spec.slots_per_epoch + }) + }); } /// Add a deposit to the pool. From 05dd936a97fbcc837e74ba35fc6e5972f7cadac2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 12:44:37 +1100 Subject: [PATCH 08/33] Operation pool: deposit pruning tests --- eth2/operation_pool/src/lib.rs | 91 ++++++++++++++++++++++++++++------ 1 file changed, 77 insertions(+), 14 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 4932a7e0a..938a3db0c 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -1,6 +1,7 @@ use int_to_bytes::int_to_bytes8; use itertools::Itertools; use ssz::ssz_encode; +use state_processing::per_block_processing::errors::ProposerSlashingValidationError; use state_processing::per_block_processing::{ validate_attestation, verify_deposit_merkle_proof, verify_exit, verify_proposer_slashing, verify_transfer, verify_transfer_partial, @@ -92,7 +93,7 @@ pub enum DepositInsertStatus { /// The deposit already existed in the pool. Duplicate, /// The deposit conflicted with an existing deposit, which was replaced. - Replaced(Deposit), + Replaced(Box), } impl OperationPool { @@ -190,7 +191,7 @@ impl OperationPool { if entry.get() == &deposit { Duplicate } else { - Replaced(entry.insert(deposit)) + Replaced(Box::new(entry.insert(deposit))) } } } @@ -222,17 +223,21 @@ impl OperationPool { std::mem::replace(&mut self.deposits, deposits_keep) } + /// The number of deposits stored in the pool. + pub fn num_deposits(&self) -> usize { + self.deposits.len() + } + /// Insert a proposer slashing into the pool. pub fn insert_proposer_slashing( &mut self, slashing: ProposerSlashing, state: &BeaconState, spec: &ChainSpec, - ) -> Result<(), ()> { + ) -> Result<(), ProposerSlashingValidationError> { // TODO: should maybe insert anyway if the proposer is unknown in the validator index, // because they could *become* known later - // FIXME: error handling - verify_proposer_slashing(&slashing, state, spec).map_err(|_| ())?; + verify_proposer_slashing(&slashing, state, spec)?; self.proposer_slashings .insert(slashing.proposer_index, slashing); Ok(()) @@ -404,7 +409,22 @@ mod tests { assert_eq!(op_pool.insert_deposit(deposit1.clone()), Fresh); assert_eq!(op_pool.insert_deposit(deposit1.clone()), Duplicate); - assert_eq!(op_pool.insert_deposit(deposit2), Replaced(deposit1)); + assert_eq!( + op_pool.insert_deposit(deposit2), + Replaced(Box::new(deposit1)) + ); + } + + // Create `count` dummy deposits with sequential deposit IDs beginning from `start`. + fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec { + let proto_deposit = Deposit::random_for_test(rng); + (start..start + count) + .map(|index| { + let mut deposit = proto_deposit.clone(); + deposit.index = index; + deposit + }) + .collect() } #[test] @@ -418,14 +438,7 @@ mod tests { let offset = 1; assert!(offset <= extra); - let proto_deposit = Deposit::random_for_test(&mut rng); - let deposits = (start..start + max_deposits + extra) - .map(|index| { - let mut deposit = proto_deposit.clone(); - deposit.index = index; - deposit - }) - .collect::>(); + let deposits = dummy_deposits(&mut rng, start, max_deposits + extra); for deposit in &deposits { assert_eq!(op_pool.insert_deposit(deposit.clone()), Fresh); @@ -442,5 +455,55 @@ mod tests { ); } + #[test] + fn prune_deposits() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let mut op_pool = OperationPool::new(); + let spec = ChainSpec::foundation(); + + let start1 = 100; + let count = 100; + let gap = 25; + let start2 = start1 + count + gap; + + let deposits1 = dummy_deposits(rng, start1, count); + let deposits2 = dummy_deposits(rng, start2, count); + + for d in deposits1.into_iter().chain(deposits2) { + op_pool.insert_deposit(d); + } + + assert_eq!(op_pool.num_deposits(), 2 * count as usize); + + let mut state = BeaconState::random_for_test(rng); + state.deposit_index = start1; + + // Pruning the first bunch of deposits in batches of 5 should work. + let step = 5; + let mut pool_size = step + 2 * count as usize; + for i in (start1..=(start1 + count)).step_by(step) { + state.deposit_index = i; + op_pool.prune_deposits(&state); + pool_size -= step; + assert_eq!(op_pool.num_deposits(), pool_size); + } + assert_eq!(pool_size, count as usize); + // Pruning in the gap should do nothing. + for i in (start1 + count..start2).step_by(step) { + state.deposit_index = i; + op_pool.prune_deposits(&state); + assert_eq!(op_pool.num_deposits(), count as usize); + } + // Same again for the later deposits. + pool_size += step; + for i in (start2..=(start2 + count)).step_by(step) { + state.deposit_index = i; + op_pool.prune_deposits(&state); + pool_size -= step; + assert_eq!(op_pool.num_deposits(), pool_size); + } + assert_eq!(op_pool.num_deposits(), 0); + } + // TODO: more tests } From 03c01c8a8db8313e8c7de6c6edb41c69eaace4ee Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 13:06:06 +1100 Subject: [PATCH 09/33] Operation pool: HashMap instead of BTreeMap --- eth2/operation_pool/src/lib.rs | 75 +++++++++++++++++----------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 938a3db0c..c625cdc83 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -10,7 +10,7 @@ use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use types::chain_spec::Domain; use types::{ Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch, - ProposerSlashing, Transfer, VoluntaryExit, + ProposerSlashing, Transfer, Validator, VoluntaryExit, }; #[cfg(test)] @@ -29,11 +29,11 @@ pub struct OperationPool { // longer than an epoch deposits: BTreeMap, /// Map from attester index to slashing. - attester_slashings: BTreeMap, + attester_slashings: HashMap, /// Map from proposer index to slashing. - proposer_slashings: BTreeMap, + proposer_slashings: HashMap, /// Map from exiting validator to their exit data. - voluntary_exits: BTreeMap, + voluntary_exits: HashMap, /// Set of transfers. transfers: HashSet, } @@ -268,24 +268,14 @@ impl OperationPool { /// Prune slashings for all slashed or withdrawn validators. pub fn prune_proposer_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { - let to_prune = self - .proposer_slashings - .keys() - .flat_map(|&validator_index| { - finalized_state - .validator_registry - .get(validator_index as usize) - .filter(|validator| { - validator.slashed - || validator.is_withdrawable_at(finalized_state.current_epoch(spec)) - }) - .map(|_| validator_index) - }) - .collect::>(); - - for validator_index in to_prune { - self.proposer_slashings.remove(&validator_index); - } + prune_validator_hash_map( + &mut self.proposer_slashings, + |validator| { + validator.slashed + || validator.is_withdrawable_at(finalized_state.current_epoch(spec)) + }, + finalized_state, + ); } // TODO: copy ProposerSlashing code for AttesterSlashing @@ -314,21 +304,11 @@ impl OperationPool { /// Prune if validator has already exited at the last finalized state. pub fn prune_voluntary_exits(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { - let to_prune = self - .voluntary_exits - .keys() - .flat_map(|&validator_index| { - finalized_state - .validator_registry - .get(validator_index as usize) - .filter(|validator| validator.is_exited_at(finalized_state.current_epoch(spec))) - .map(|_| validator_index) - }) - .collect::>(); - - for validator_index in to_prune { - self.voluntary_exits.remove(&validator_index); - } + prune_validator_hash_map( + &mut self.voluntary_exits, + |validator| validator.is_exited_at(finalized_state.current_epoch(spec)), + finalized_state, + ); } /// Insert a transfer into the pool, checking it for validity in the process. @@ -393,6 +373,26 @@ where .collect() } +/// Remove all entries from the given hash map for which `prune_if` returns true. +/// +/// The keys in the map should be validator indices, which will be looked up +/// in the state's validator registry and then passed to `prune_if`. +/// Entries for unknown validators will be kept. +fn prune_validator_hash_map( + map: &mut HashMap, + prune_if: F, + finalized_state: &BeaconState, +) where + F: Fn(&Validator) -> bool, +{ + map.retain(|&validator_index, _| { + finalized_state + .validator_registry + .get(validator_index as usize) + .map_or(true, |validator| !prune_if(validator)) + }); +} + #[cfg(test)] mod tests { use super::DepositInsertStatus::*; @@ -459,7 +459,6 @@ mod tests { fn prune_deposits() { let rng = &mut XorShiftRng::from_seed([42; 16]); let mut op_pool = OperationPool::new(); - let spec = ChainSpec::foundation(); let start1 = 100; let count = 100; From b2fe14e12c23304241a6e57252ced39920f45c5c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 15:57:41 +1100 Subject: [PATCH 10/33] Operation pool: refactor verify_deposit/exit --- eth2/operation_pool/src/lib.rs | 134 ++++++++++++------ .../src/per_block_processing.rs | 5 +- .../per_block_processing/verify_deposit.rs | 6 +- .../src/per_block_processing/verify_exit.rs | 27 +++- eth2/types/src/attestation.rs | 7 +- 5 files changed, 118 insertions(+), 61 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index c625cdc83..ecf4e41e7 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -3,8 +3,8 @@ use itertools::Itertools; use ssz::ssz_encode; use state_processing::per_block_processing::errors::ProposerSlashingValidationError; use state_processing::per_block_processing::{ - validate_attestation, verify_deposit_merkle_proof, verify_exit, verify_proposer_slashing, - verify_transfer, verify_transfer_partial, + validate_attestation, verify_deposit, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_partial, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use types::chain_spec::Domain; @@ -179,19 +179,27 @@ impl OperationPool { /// Add a deposit to the pool. /// /// No two distinct deposits should be added with the same index. - pub fn insert_deposit(&mut self, deposit: Deposit) -> DepositInsertStatus { + pub fn insert_deposit( + &mut self, + deposit: Deposit, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { use DepositInsertStatus::*; match self.deposits.entry(deposit.index) { Entry::Vacant(entry) => { + // FIXME: error prop + verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec).map_err(|_| ())?; entry.insert(deposit); - Fresh + Ok(Fresh) } Entry::Occupied(mut entry) => { if entry.get() == &deposit { - Duplicate + Ok(Duplicate) } else { - Replaced(Box::new(entry.insert(deposit))) + verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec).map_err(|_| ())?; + Ok(Replaced(Box::new(entry.insert(deposit)))) } } } @@ -204,14 +212,7 @@ impl OperationPool { let start_idx = state.deposit_index; (start_idx..start_idx + spec.max_deposits) .map(|idx| self.deposits.get(&idx)) - .take_while(|deposit| { - // NOTE: we don't use verify_deposit, because it requires the - // deposit's index to match the state's, and we would like to return - // a batch with increasing indices - deposit.map_or(false, |deposit| { - !VERIFY_DEPOSIT_PROOFS || verify_deposit_merkle_proof(state, deposit, spec) - }) - }) + .take_while(Option::is_some) .flatten() .cloned() .collect() @@ -287,7 +288,7 @@ impl OperationPool { state: &BeaconState, spec: &ChainSpec, ) -> Result<(), ()> { - verify_exit(state, &exit, spec, false).map_err(|_| ())?; + verify_exit_time_independent_only(state, &exit, spec).map_err(|_| ())?; self.voluntary_exits.insert(exit.validator_index, exit); Ok(()) } @@ -297,7 +298,7 @@ impl OperationPool { pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { filter_limit_operations( self.voluntary_exits.values(), - |exit| verify_exit(state, exit, spec, true).is_ok(), + |exit| verify_exit(state, exit, spec).is_ok(), spec.max_voluntary_exits, ) } @@ -398,53 +399,51 @@ mod tests { use super::DepositInsertStatus::*; use super::*; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::*; #[test] fn insert_deposit() { - let mut rng = XorShiftRng::from_seed([42; 16]); + let rng = &mut XorShiftRng::from_seed([42; 16]); + let (ref spec, ref state) = test_state(rng); let mut op_pool = OperationPool::new(); - let deposit1 = Deposit::random_for_test(&mut rng); - let mut deposit2 = Deposit::random_for_test(&mut rng); + let deposit1 = make_deposit(rng, state, spec); + let mut deposit2 = make_deposit(rng, state, spec); deposit2.index = deposit1.index; - assert_eq!(op_pool.insert_deposit(deposit1.clone()), Fresh); - assert_eq!(op_pool.insert_deposit(deposit1.clone()), Duplicate); assert_eq!( - op_pool.insert_deposit(deposit2), - Replaced(Box::new(deposit1)) + op_pool.insert_deposit(deposit1.clone(), state, spec), + Ok(Fresh) + ); + assert_eq!( + op_pool.insert_deposit(deposit1.clone(), state, spec), + Ok(Duplicate) + ); + assert_eq!( + op_pool.insert_deposit(deposit2, state, spec), + Ok(Replaced(Box::new(deposit1))) ); - } - - // Create `count` dummy deposits with sequential deposit IDs beginning from `start`. - fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec { - let proto_deposit = Deposit::random_for_test(rng); - (start..start + count) - .map(|index| { - let mut deposit = proto_deposit.clone(); - deposit.index = index; - deposit - }) - .collect() } #[test] fn get_deposits_max() { - let mut rng = XorShiftRng::from_seed([42; 16]); + let rng = &mut XorShiftRng::from_seed([42; 16]); + let (spec, mut state) = test_state(rng); let mut op_pool = OperationPool::new(); - let spec = ChainSpec::foundation(); let start = 10000; let max_deposits = spec.max_deposits; let extra = 5; let offset = 1; assert!(offset <= extra); - let deposits = dummy_deposits(&mut rng, start, max_deposits + extra); + let deposits = dummy_deposits(rng, &state, &spec, start, max_deposits + extra); for deposit in &deposits { - assert_eq!(op_pool.insert_deposit(deposit.clone()), Fresh); + assert_eq!( + op_pool.insert_deposit(deposit.clone(), &state, &spec), + Ok(Fresh) + ); } - let mut state = BeaconState::random_for_test(&mut rng); state.deposit_index = start + offset; let deposits_for_block = op_pool.get_deposits(&state, &spec); @@ -458,18 +457,20 @@ mod tests { #[test] fn prune_deposits() { let rng = &mut XorShiftRng::from_seed([42; 16]); + let (spec, state) = test_state(rng); let mut op_pool = OperationPool::new(); let start1 = 100; - let count = 100; + // test is super slow in debug mode if this parameter is too high + let count = 5; let gap = 25; let start2 = start1 + count + gap; - let deposits1 = dummy_deposits(rng, start1, count); - let deposits2 = dummy_deposits(rng, start2, count); + let deposits1 = dummy_deposits(rng, &state, &spec, start1, count); + let deposits2 = dummy_deposits(rng, &state, &spec, start2, count); for d in deposits1.into_iter().chain(deposits2) { - op_pool.insert_deposit(d); + assert!(op_pool.insert_deposit(d, &state, &spec).is_ok()); } assert_eq!(op_pool.num_deposits(), 2 * count as usize); @@ -504,5 +505,50 @@ mod tests { assert_eq!(op_pool.num_deposits(), 0); } + // Create a random deposit (with a valid proof of posession) + fn make_deposit(rng: &mut XorShiftRng, state: &BeaconState, spec: &ChainSpec) -> Deposit { + let keypair = Keypair::random(); + let mut deposit = Deposit::random_for_test(rng); + let mut deposit_input = DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), + proof_of_possession: Signature::empty_signature(), + }; + deposit_input.proof_of_possession = deposit_input.create_proof_of_possession( + &keypair.sk, + state.slot.epoch(spec.slots_per_epoch), + &state.fork, + spec, + ); + deposit.deposit_data.deposit_input = deposit_input; + deposit + } + + // Create `count` dummy deposits with sequential deposit IDs beginning from `start`. + fn dummy_deposits( + rng: &mut XorShiftRng, + state: &BeaconState, + spec: &ChainSpec, + start: u64, + count: u64, + ) -> Vec { + let proto_deposit = make_deposit(rng, state, spec); + (start..start + count) + .map(|index| { + let mut deposit = proto_deposit.clone(); + deposit.index = index; + deposit + }) + .collect() + } + + fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) { + let spec = ChainSpec::foundation(); + let mut state = BeaconState::random_for_test(rng); + state.fork = Fork::genesis(&spec); + + (spec, state) + } + // TODO: more tests } diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index e0e359552..617da00d4 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -11,9 +11,8 @@ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; pub use verify_deposit::{ get_existing_validator_index, verify_deposit, verify_deposit_index, - verify_deposit_merkle_proof, }; -pub use verify_exit::verify_exit; +pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; pub use verify_slashable_attestation::verify_slashable_attestation; pub use verify_transfer::{execute_transfer, verify_transfer, verify_transfer_partial}; @@ -429,7 +428,7 @@ pub fn process_exits( .par_iter() .enumerate() .try_for_each(|(i, exit)| { - verify_exit(&state, exit, spec, true).map_err(|e| e.into_with_index(i)) + verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i)) })?; // Update the state in series. diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 1b974d972..a3a0f5734 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -89,11 +89,7 @@ pub fn get_existing_validator_index( /// Verify that a deposit is included in the state's eth1 deposit root. /// /// Spec v0.5.0 -pub fn verify_deposit_merkle_proof( - state: &BeaconState, - deposit: &Deposit, - spec: &ChainSpec, -) -> bool { +fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { let leaf = hash(&get_serialized_deposit_data(deposit)); verify_merkle_proof( Hash256::from_slice(&leaf), diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index 14dad3442..a3b694395 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -7,17 +7,30 @@ use types::*; /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// -/// The `check_future_epoch` argument determines whether the exit's epoch should be checked -/// against the state's current epoch to ensure it doesn't occur in the future. -/// It should ordinarily be set to true, except for operations stored for -/// some time (such as in the OperationPool). -/// /// Spec v0.5.0 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, spec: &ChainSpec, - check_future_epoch: bool, +) -> Result<(), Error> { + verify_exit_parametric(state, exit, spec, false) +} + +/// Like `verify_exit` but doesn't run checks which may become true in future states. +pub fn verify_exit_time_independent_only( + state: &BeaconState, + exit: &VoluntaryExit, + spec: &ChainSpec, +) -> Result<(), Error> { + verify_exit_parametric(state, exit, spec, true) +} + +/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true. +fn verify_exit_parametric( + state: &BeaconState, + exit: &VoluntaryExit, + spec: &ChainSpec, + time_independent_only: bool, ) -> Result<(), Error> { let validator = state .validator_registry @@ -38,7 +51,7 @@ pub fn verify_exit( // Exits must specify an epoch when they become valid; they are not valid before then. verify!( - !check_future_epoch || state.current_epoch(spec) >= exit.epoch, + time_independent_only || state.current_epoch(spec) >= exit.epoch, Invalid::FutureEpoch { state: state.current_epoch(spec), exit: exit.epoch diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 6c572c852..043711015 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -31,7 +31,9 @@ pub struct Attestation { impl Attestation { /// Are the aggregation bitfields of these attestations disjoint? pub fn signers_disjoint_from(&self, other: &Attestation) -> bool { - self.aggregation_bitfield.intersection(&other.aggregation_bitfield).is_zero() + self.aggregation_bitfield + .intersection(&other.aggregation_bitfield) + .is_zero() } /// Aggregate another Attestation into this one. @@ -41,7 +43,8 @@ impl Attestation { debug_assert_eq!(self.data, other.data); debug_assert!(self.signers_disjoint_from(other)); - self.aggregation_bitfield.union_inplace(&other.aggregation_bitfield); + self.aggregation_bitfield + .union_inplace(&other.aggregation_bitfield); self.custody_bitfield.union_inplace(&other.custody_bitfield); // FIXME: signature aggregation once our BLS library wraps it } From 95ed40222827c5aba0259ce4a128ab51af54631b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 16:13:06 +1100 Subject: [PATCH 11/33] op-pool: rename to verify_*_time_independent_only --- eth2/operation_pool/src/lib.rs | 4 +-- .../src/per_block_processing.rs | 8 ++--- .../per_block_processing/verify_transfer.rs | 30 +++++++++++-------- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index ecf4e41e7..ff5a7416b 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -4,7 +4,7 @@ use ssz::ssz_encode; use state_processing::per_block_processing::errors::ProposerSlashingValidationError; use state_processing::per_block_processing::{ validate_attestation, verify_deposit, verify_exit, verify_exit_time_independent_only, - verify_proposer_slashing, verify_transfer, verify_transfer_partial, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use types::chain_spec::Domain; @@ -322,7 +322,7 @@ impl OperationPool { // The signature of the transfer isn't hashed, but because we check // it before we insert into the HashSet, we can't end up with duplicate // transactions. - verify_transfer_partial(state, &transfer, spec, true).map_err(|_| ())?; + verify_transfer_time_independent_only(state, &transfer, spec).map_err(|_| ())?; self.transfers.insert(transfer); Ok(()) } diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 617da00d4..55e2c29d0 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -9,12 +9,12 @@ pub use self::verify_attester_slashing::{ }; pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; -pub use verify_deposit::{ - get_existing_validator_index, verify_deposit, verify_deposit_index, -}; +pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; pub use verify_slashable_attestation::verify_slashable_attestation; -pub use verify_transfer::{execute_transfer, verify_transfer, verify_transfer_partial}; +pub use verify_transfer::{ + execute_transfer, verify_transfer, verify_transfer_time_independent_only, +}; pub mod errors; mod validate_attestation; diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 4f3815797..ac9e9aa09 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -16,18 +16,24 @@ pub fn verify_transfer( transfer: &Transfer, spec: &ChainSpec, ) -> Result<(), Error> { - verify_transfer_partial(state, transfer, spec, false) + verify_transfer_parametric(state, transfer, spec, false) } -/// Parametric version of `verify_transfer` that allows some checks to be skipped. -/// -/// In everywhere except the operation pool, `verify_transfer` should be preferred over this -/// function. -pub fn verify_transfer_partial( +/// Like `verify_transfer` but doesn't run checks which may become true in future states. +pub fn verify_transfer_time_independent_only( state: &BeaconState, transfer: &Transfer, spec: &ChainSpec, - for_op_pool: bool, +) -> Result<(), Error> { + verify_transfer_parametric(state, transfer, spec, true) +} + +/// Parametric version of `verify_transfer` that allows some checks to be skipped. +fn verify_transfer_parametric( + state: &BeaconState, + transfer: &Transfer, + spec: &ChainSpec, + time_independent_only: bool, ) -> Result<(), Error> { let sender_balance = *state .validator_balances @@ -40,17 +46,17 @@ pub fn verify_transfer_partial( .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; verify!( - for_op_pool || sender_balance >= transfer.amount, + time_independent_only || sender_balance >= transfer.amount, Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) ); verify!( - for_op_pool || sender_balance >= transfer.fee, + time_independent_only || sender_balance >= transfer.fee, Invalid::FromBalanceInsufficient(transfer.fee, sender_balance) ); verify!( - for_op_pool + time_independent_only || (sender_balance == total_amount) || (sender_balance >= (total_amount + spec.min_deposit_amount)), Invalid::InvalidResultingFromBalance( @@ -59,7 +65,7 @@ pub fn verify_transfer_partial( ) ); - if for_op_pool { + if time_independent_only { verify!( state.slot <= transfer.slot, Invalid::TransferSlotInPast(state.slot, transfer.slot) @@ -78,7 +84,7 @@ pub fn verify_transfer_partial( let epoch = state.slot.epoch(spec.slots_per_epoch); verify!( - for_op_pool + time_independent_only || sender_validator.is_withdrawable_at(epoch) || sender_validator.activation_epoch == spec.far_future_epoch, Invalid::FromValidatorIneligableForTransfer(transfer.sender) From 3396f2f08e05d82485b2317db04bce54831f29da Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 16:28:04 +1100 Subject: [PATCH 12/33] op-pool: propagate errors, sort by transfer fee --- eth2/operation_pool/src/lib.rs | 46 ++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index ff5a7416b..c6af777e6 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -1,7 +1,10 @@ use int_to_bytes::int_to_bytes8; use itertools::Itertools; use ssz::ssz_encode; -use state_processing::per_block_processing::errors::ProposerSlashingValidationError; +use state_processing::per_block_processing::errors::{ + AttestationValidationError, DepositValidationError, ExitValidationError, + ProposerSlashingValidationError, TransferValidationError, +}; use state_processing::per_block_processing::{ validate_attestation, verify_deposit, verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, @@ -108,10 +111,10 @@ impl OperationPool { attestation: Attestation, state: &BeaconState, spec: &ChainSpec, - ) -> Result<(), ()> { + ) -> Result<(), AttestationValidationError> { // Check that attestation signatures are valid. // FIXME: should disable the time-dependent checks. - validate_attestation(state, &attestation, spec).map_err(|_| ())?; + validate_attestation(state, &attestation, spec)?; let id = AttestationId::from_data(&attestation.data, state, spec); @@ -143,7 +146,7 @@ impl OperationPool { /// Get a list of attestations for inclusion in a block. pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { // Attestations for the current fork... - // TODO: should we also check domain bytes for the previous epoch? + // FIXME: should we also check domain bytes for the previous epoch? let current_epoch = state.slot.epoch(spec.slots_per_epoch); let domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); self.attestations @@ -184,13 +187,12 @@ impl OperationPool { deposit: Deposit, state: &BeaconState, spec: &ChainSpec, - ) -> Result { + ) -> Result { use DepositInsertStatus::*; match self.deposits.entry(deposit.index) { Entry::Vacant(entry) => { - // FIXME: error prop - verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec).map_err(|_| ())?; + verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?; entry.insert(deposit); Ok(Fresh) } @@ -198,7 +200,7 @@ impl OperationPool { if entry.get() == &deposit { Ok(Duplicate) } else { - verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec).map_err(|_| ())?; + verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?; Ok(Replaced(Box::new(entry.insert(deposit)))) } } @@ -279,7 +281,7 @@ impl OperationPool { ); } - // TODO: copy ProposerSlashing code for AttesterSlashing + // FIXME: copy ProposerSlashing code for AttesterSlashing /// Insert a voluntary exit, validating it almost-entirely (future exits are permitted). pub fn insert_voluntary_exit( @@ -287,14 +289,13 @@ impl OperationPool { exit: VoluntaryExit, state: &BeaconState, spec: &ChainSpec, - ) -> Result<(), ()> { - verify_exit_time_independent_only(state, &exit, spec).map_err(|_| ())?; + ) -> Result<(), ExitValidationError> { + verify_exit_time_independent_only(state, &exit, spec)?; self.voluntary_exits.insert(exit.validator_index, exit); Ok(()) } /// Get a list of voluntary exits for inclusion in a block. - // TODO: could optimise this by eliding the checks that have already been done on insert pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { filter_limit_operations( self.voluntary_exits.values(), @@ -318,25 +319,26 @@ impl OperationPool { transfer: Transfer, state: &BeaconState, spec: &ChainSpec, - ) -> Result<(), ()> { + ) -> Result<(), TransferValidationError> { // The signature of the transfer isn't hashed, but because we check // it before we insert into the HashSet, we can't end up with duplicate // transactions. - verify_transfer_time_independent_only(state, &transfer, spec).map_err(|_| ())?; + verify_transfer_time_independent_only(state, &transfer, spec)?; self.transfers.insert(transfer); Ok(()) } /// Get a list of transfers for inclusion in a block. - // TODO: improve the economic optimality of this function by taking the transfer - // fees into account, and dependencies between transfers in the same block - // e.g. A pays B, B pays C + // TODO: improve the economic optimality of this function by accounting for + // dependencies between transfers in the same block e.g. A pays B, B pays C pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { - filter_limit_operations( - &self.transfers, - |transfer| verify_transfer(state, transfer, spec).is_ok(), - spec.max_transfers, - ) + self.transfers + .iter() + .filter(|transfer| verify_transfer(state, transfer, spec).is_ok()) + .sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee)) + .take(spec.max_transfers as usize) + .cloned() + .collect() } /// Prune the set of transfers by removing all those whose slot has already passed. From e512f7c0e1cc0d836e1aac2adcea4bc5b2b7af23 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 20 Mar 2019 16:52:58 +1100 Subject: [PATCH 13/33] op-pool: validate_attestation_time_independent_only --- eth2/operation_pool/src/lib.rs | 8 +- .../src/per_block_processing.rs | 5 +- .../validate_attestation.rs | 108 +++++++++++------- 3 files changed, 75 insertions(+), 46 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index c6af777e6..6d276600a 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -6,8 +6,9 @@ use state_processing::per_block_processing::errors::{ ProposerSlashingValidationError, TransferValidationError, }; use state_processing::per_block_processing::{ - validate_attestation, verify_deposit, verify_exit, verify_exit_time_independent_only, - verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + validate_attestation, validate_attestation_time_independent_only, verify_deposit, verify_exit, + verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, + verify_transfer_time_independent_only, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use types::chain_spec::Domain; @@ -113,8 +114,7 @@ impl OperationPool { spec: &ChainSpec, ) -> Result<(), AttestationValidationError> { // Check that attestation signatures are valid. - // FIXME: should disable the time-dependent checks. - validate_attestation(state, &attestation, spec)?; + validate_attestation_time_independent_only(state, &attestation, spec)?; let id = AttestationId::from_data(&attestation.data, state, spec); diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 55e2c29d0..031b919c1 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -8,7 +8,10 @@ pub use self::verify_attester_slashing::{ gather_attester_slashing_indices, verify_attester_slashing, }; pub use self::verify_proposer_slashing::verify_proposer_slashing; -pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; +pub use validate_attestation::{ + validate_attestation, validate_attestation_time_independent_only, + validate_attestation_without_signature, +}; pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; pub use verify_slashable_attestation::verify_slashable_attestation; diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 2143988a4..3b89bec99 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -14,7 +14,16 @@ pub fn validate_attestation( attestation: &Attestation, spec: &ChainSpec, ) -> Result<(), Error> { - validate_attestation_signature_optional(state, attestation, spec, true) + validate_attestation_parametric(state, attestation, spec, true, false) +} + +/// Like `validate_attestation` but doesn't run checks which may become true in future states. +pub fn validate_attestation_time_independent_only( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + validate_attestation_parametric(state, attestation, spec, true, true) } /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the @@ -28,7 +37,7 @@ pub fn validate_attestation_without_signature( attestation: &Attestation, spec: &ChainSpec, ) -> Result<(), Error> { - validate_attestation_signature_optional(state, attestation, spec, false) + validate_attestation_parametric(state, attestation, spec, false, false) } /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the @@ -36,15 +45,13 @@ pub fn validate_attestation_without_signature( /// /// /// Spec v0.5.0 -fn validate_attestation_signature_optional( +fn validate_attestation_parametric( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, verify_signature: bool, + time_independent_only: bool, ) -> Result<(), Error> { - let state_epoch = state.slot.epoch(spec.slots_per_epoch); - let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); - // Can't submit pre-historic attestations. verify!( attestation.data.slot >= spec.genesis_slot, @@ -65,7 +72,8 @@ fn validate_attestation_signature_optional( // Can't submit attestation too quickly. verify!( - attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, + time_independent_only + || attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, Invalid::IncludedTooEarly { state: state.slot, delay: spec.min_attestation_inclusion_delay, @@ -74,40 +82,8 @@ fn validate_attestation_signature_optional( ); // Verify the justified epoch and root is correct. - if attestation_epoch >= state_epoch { - verify!( - attestation.data.source_epoch == state.current_justified_epoch, - Invalid::WrongJustifiedEpoch { - state: state.current_justified_epoch, - attestation: attestation.data.source_epoch, - is_current: true, - } - ); - verify!( - attestation.data.source_root == state.current_justified_root, - Invalid::WrongJustifiedRoot { - state: state.current_justified_root, - attestation: attestation.data.source_root, - is_current: true, - } - ); - } else { - verify!( - attestation.data.source_epoch == state.previous_justified_epoch, - Invalid::WrongJustifiedEpoch { - state: state.previous_justified_epoch, - attestation: attestation.data.source_epoch, - is_current: false, - } - ); - verify!( - attestation.data.source_root == state.previous_justified_root, - Invalid::WrongJustifiedRoot { - state: state.previous_justified_root, - attestation: attestation.data.source_root, - is_current: true, - } - ); + if !time_independent_only { + verify_justified_epoch_and_root(attestation, state, spec)?; } // Check that the crosslink data is valid. @@ -188,6 +164,56 @@ fn validate_attestation_signature_optional( Ok(()) } +/// Verify that the `source_epoch` and `source_root` of an `Attestation` correctly +/// match the current (or previous) justified epoch and root from the state. +/// +/// Spec v0.5.0 +fn verify_justified_epoch_and_root( + attestation: &Attestation, + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let state_epoch = state.slot.epoch(spec.slots_per_epoch); + let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); + + if attestation_epoch >= state_epoch { + verify!( + attestation.data.source_epoch == state.current_justified_epoch, + Invalid::WrongJustifiedEpoch { + state: state.current_justified_epoch, + attestation: attestation.data.source_epoch, + is_current: true, + } + ); + verify!( + attestation.data.source_root == state.current_justified_root, + Invalid::WrongJustifiedRoot { + state: state.current_justified_root, + attestation: attestation.data.source_root, + is_current: true, + } + ); + } else { + verify!( + attestation.data.source_epoch == state.previous_justified_epoch, + Invalid::WrongJustifiedEpoch { + state: state.previous_justified_epoch, + attestation: attestation.data.source_epoch, + is_current: false, + } + ); + verify!( + attestation.data.source_root == state.previous_justified_root, + Invalid::WrongJustifiedRoot { + state: state.previous_justified_root, + attestation: attestation.data.source_root, + is_current: true, + } + ); + } + Ok(()) +} + /// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the /// `aggregate_signature` is valid. /// From 22a90a0224c38c7b7913d520d9c90bc0fa5005c1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 25 Mar 2019 11:56:30 +1100 Subject: [PATCH 14/33] op-pool: check previous epoch in get_attestations --- eth2/operation_pool/src/lib.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 6d276600a..fc894da27 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -145,13 +145,17 @@ impl OperationPool { /// Get a list of attestations for inclusion in a block. pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { - // Attestations for the current fork... - // FIXME: should we also check domain bytes for the previous epoch? - let current_epoch = state.slot.epoch(spec.slots_per_epoch); - let domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); + // Attestations for the current fork, which may be from the current or previous epoch. + let prev_epoch = state.previous_epoch(spec); + let current_epoch = state.current_epoch(spec); + let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec); + let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); self.attestations .iter() - .filter(|(key, _)| key.domain_bytes_match(&domain_bytes)) + .filter(|(key, _)| { + key.domain_bytes_match(&prev_domain_bytes) + || key.domain_bytes_match(&curr_domain_bytes) + }) .flat_map(|(_, attestations)| attestations) // That are valid... .filter(|attestation| validate_attestation(state, attestation, spec).is_ok()) From 0bf8cb953eeaa86662455e0025be0e7f1f4b4808 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 25 Mar 2019 12:44:30 +1100 Subject: [PATCH 15/33] BLS: wrap AggregateSignature::add_aggregate --- eth2/utils/bls/src/aggregate_signature.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 7b80d3bbf..44eafdcdf 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -27,6 +27,11 @@ impl AggregateSignature { self.0.add(signature.as_raw()) } + /// Add (aggregate) another `AggregateSignature`. + pub fn add_aggregate(&mut self, agg_signature: &AggregateSignature) { + self.0.add_aggregate(&agg_signature.0) + } + /// Verify the `AggregateSignature` against an `AggregatePublicKey`. /// /// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys From bde7ab79c800203a1624a014db656017155ff381 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 25 Mar 2019 12:45:24 +1100 Subject: [PATCH 16/33] types: aggregate signatures in attestations --- eth2/types/src/attestation.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 043711015..dabccfde7 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -46,7 +46,8 @@ impl Attestation { self.aggregation_bitfield .union_inplace(&other.aggregation_bitfield); self.custody_bitfield.union_inplace(&other.custody_bitfield); - // FIXME: signature aggregation once our BLS library wraps it + self.aggregate_signature + .add_aggregate(&other.aggregate_signature); } } From 518359e89845a7d16f99253b07560fa8a667e101 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 25 Mar 2019 16:58:20 +1100 Subject: [PATCH 17/33] op-pool: implement attester slashings --- eth2/operation_pool/src/lib.rs | 120 +++++++++++++++--- .../src/per_block_processing.rs | 3 +- .../verify_attester_slashing.rs | 21 ++- 3 files changed, 122 insertions(+), 22 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index fc894da27..fadbf449d 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -2,12 +2,13 @@ use int_to_bytes::int_to_bytes8; use itertools::Itertools; use ssz::ssz_encode; use state_processing::per_block_processing::errors::{ - AttestationValidationError, DepositValidationError, ExitValidationError, - ProposerSlashingValidationError, TransferValidationError, + AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::per_block_processing::{ - validate_attestation, validate_attestation_time_independent_only, verify_deposit, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, + gather_attester_slashing_indices_modular, validate_attestation, + validate_attestation_time_independent_only, verify_attester_slashing, verify_deposit, + verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; @@ -32,8 +33,8 @@ pub struct OperationPool { // and the spec doesn't seem to accomodate for re-orgs on a time-frame // longer than an epoch deposits: BTreeMap, - /// Map from attester index to slashing. - attester_slashings: HashMap, + /// Map from two attestation IDs to a slashing for those IDs. + attester_slashings: HashMap<(AttestationId, AttestationId), AttesterSlashing>, /// Map from proposer index to slashing. proposer_slashings: HashMap, /// Map from exiting validator to their exit data. @@ -250,18 +251,44 @@ impl OperationPool { Ok(()) } - /// Only check whether the implicated validator has already been slashed, because - /// all slashings in the pool were validated upon insertion. - // TODO: we need a mechanism to avoid including a proposer slashing and an attester - // slashing for the same validator in the same block - pub fn get_proposer_slashings( + /// Compute the tuple ID that is used to identify an attester slashing. + /// + /// Depends on the fork field of the state, but not on the state's epoch. + fn attester_slashing_id( + slashing: &AttesterSlashing, + state: &BeaconState, + spec: &ChainSpec, + ) -> (AttestationId, AttestationId) { + ( + AttestationId::from_data(&slashing.slashable_attestation_1.data, state, spec), + AttestationId::from_data(&slashing.slashable_attestation_2.data, state, spec), + ) + } + + /// Insert an attester slashing into the pool. + pub fn insert_attester_slashing( + &mut self, + slashing: AttesterSlashing, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), AttesterSlashingValidationError> { + verify_attester_slashing(state, &slashing, true, spec)?; + let id = Self::attester_slashing_id(&slashing, state, spec); + self.attester_slashings.insert(id, slashing); + Ok(()) + } + + /// Get proposer and attester slashings for inclusion in a block. + /// + /// This function computes both types of slashings together, because + /// attester slashings may be invalidated by proposer slashings included + /// earlier in the block. + pub fn get_slashings( &self, state: &BeaconState, spec: &ChainSpec, - ) -> Vec { - // We sort by validator index, which is safe, because a validator can only supply - // so many valid slashings for lower-indexed validators (and even that is unlikely) - filter_limit_operations( + ) -> (Vec, Vec) { + let proposer_slashings = filter_limit_operations( self.proposer_slashings.values(), |slashing| { state @@ -270,10 +297,48 @@ impl OperationPool { .map_or(false, |validator| !validator.slashed) }, spec.max_proposer_slashings, - ) + ); + + // Set of validators to be slashed, so we don't attempt to construct invalid attester + // slashings. + let mut to_be_slashed = proposer_slashings + .iter() + .map(|s| s.proposer_index) + .collect::>(); + + let attester_slashings = self + .attester_slashings + .iter() + .filter(|(id, slashing)| { + // Check the fork. + Self::attester_slashing_id(slashing, state, spec) == **id + }) + .filter(|(_, slashing)| { + // Take all slashings that will slash 1 or more validators. + let slashed_validators = gather_attester_slashing_indices_modular( + state, + slashing, + |index, validator| validator.slashed || to_be_slashed.contains(&index), + spec, + ); + + // Extend the `to_be_slashed` set so subsequent iterations don't try to include + // useless slashings. + if let Ok(validators) = slashed_validators { + to_be_slashed.extend(validators); + true + } else { + false + } + }) + .take(spec.max_attester_slashings as usize) + .map(|(_, slashing)| slashing.clone()) + .collect(); + + (proposer_slashings, attester_slashings) } - /// Prune slashings for all slashed or withdrawn validators. + /// Prune proposer slashings for all slashed or withdrawn validators. pub fn prune_proposer_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { prune_validator_hash_map( &mut self.proposer_slashings, @@ -285,7 +350,22 @@ impl OperationPool { ); } - // FIXME: copy ProposerSlashing code for AttesterSlashing + /// Prune attester slashings for all slashed or withdrawn validators, or attestations on another + /// fork. + pub fn prune_attester_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + self.attester_slashings.retain(|id, slashing| { + let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id; + let curr_epoch = finalized_state.current_epoch(spec); + let slashing_ok = gather_attester_slashing_indices_modular( + finalized_state, + slashing, + |_, validator| validator.slashed || validator.is_withdrawable_at(curr_epoch), + spec, + ) + .is_ok(); + fork_ok && slashing_ok + }); + } /// Insert a voluntary exit, validating it almost-entirely (future exits are permitted). pub fn insert_voluntary_exit( @@ -359,13 +439,13 @@ impl OperationPool { self.prune_attestations(finalized_state, spec); self.prune_deposits(finalized_state); self.prune_proposer_slashings(finalized_state, spec); - // FIXME: add attester slashings + self.prune_attester_slashings(finalized_state, spec); self.prune_voluntary_exits(finalized_state, spec); self.prune_transfers(finalized_state); } } -/// Filter up to a maximum number of operations out of a slice. +/// Filter up to a maximum number of operations out of an iterator. fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec where I: IntoIterator, diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 031b919c1..e79f5f08c 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -5,7 +5,8 @@ use ssz::{SignedRoot, TreeHash}; use types::*; pub use self::verify_attester_slashing::{ - gather_attester_slashing_indices, verify_attester_slashing, + gather_attester_slashing_indices, gather_attester_slashing_indices_modular, + verify_attester_slashing, }; pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use validate_attestation::{ diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index a198d2a3e..abf99da64 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -47,6 +47,25 @@ pub fn gather_attester_slashing_indices( attester_slashing: &AttesterSlashing, spec: &ChainSpec, ) -> Result, Error> { + gather_attester_slashing_indices_modular( + state, + attester_slashing, + |_, validator| validator.slashed, + spec, + ) +} + +/// Same as `gather_attester_slashing_indices` but allows the caller to specify the criteria +/// for determining whether a given validator should be considered slashed. +pub fn gather_attester_slashing_indices_modular( + state: &BeaconState, + attester_slashing: &AttesterSlashing, + is_slashed: F, + spec: &ChainSpec, +) -> Result, Error> +where + F: Fn(u64, &Validator) -> bool, +{ let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; @@ -57,7 +76,7 @@ pub fn gather_attester_slashing_indices( .get(*i as usize) .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; - if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { + if slashable_attestation_2.validator_indices.contains(&i) & !is_slashed(*i, validator) { // TODO: verify that we should reject any slashable attestation which includes a // withdrawn validator. PH has asked the question on gitter, awaiting response. verify!( From 99dbed86f150e27aed61206c2683553183b56d43 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 26 Mar 2019 18:20:01 +1100 Subject: [PATCH 18/33] types: PendingAttestation::from_attestation --- eth2/state_processing/src/per_block_processing.rs | 8 +------- eth2/types/src/pending_attestation.rs | 14 +++++++++++++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index e79f5f08c..6c52a2676 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -322,13 +322,7 @@ pub fn process_attestations( // Update the state in series. for attestation in attestations { - let pending_attestation = PendingAttestation { - data: attestation.data.clone(), - aggregation_bitfield: attestation.aggregation_bitfield.clone(), - custody_bitfield: attestation.custody_bitfield.clone(), - inclusion_slot: state.slot, - }; - + let pending_attestation = PendingAttestation::from_attestation(attestation, state.slot); let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); if attestation_epoch == state.current_epoch(spec) { diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index ca50b6d1c..938e59bef 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{AttestationData, Bitfield, Slot}; +use crate::{Attestation, AttestationData, Bitfield, Slot}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; @@ -16,6 +16,18 @@ pub struct PendingAttestation { pub inclusion_slot: Slot, } +impl PendingAttestation { + /// Create a `PendingAttestation` from an `Attestation`, at the given `inclusion_slot`. + pub fn from_attestation(attestation: &Attestation, inclusion_slot: Slot) -> Self { + PendingAttestation { + data: attestation.data.clone(), + aggregation_bitfield: attestation.aggregation_bitfield.clone(), + custody_bitfield: attestation.custody_bitfield.clone(), + inclusion_slot, + } + } +} + #[cfg(test)] mod tests { use super::*; From e5a3b3dd067773ae1c31d9bfa27c888e9505b669 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 26 Mar 2019 18:29:02 +1100 Subject: [PATCH 19/33] op-pool: attestation tests --- eth2/operation_pool/src/lib.rs | 290 ++++++++++++++++++++++++++++++++- 1 file changed, 289 insertions(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index fadbf449d..e67a201c7 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -144,6 +144,11 @@ impl OperationPool { Ok(()) } + /// Total number of attestations in the pool, including attestations for the same data. + pub fn num_attestations(&self) -> usize { + self.attestations.values().map(|atts| atts.len()).sum() + } + /// Get a list of attestations for inclusion in a block. pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { // Attestations for the current fork, which may be from the current or previous epoch. @@ -161,6 +166,7 @@ impl OperationPool { // That are valid... .filter(|attestation| validate_attestation(state, attestation, spec).is_ok()) // Scored by the number of new attestations they introduce (descending) + // TODO: need to consider attestations introduced in THIS block .map(|att| (att, attestation_score(att, state))) .sorted_by_key(|&(_, score)| std::cmp::Reverse(score)) // Limited to the maximum number of attestations per block @@ -484,7 +490,7 @@ fn prune_validator_hash_map( mod tests { use super::DepositInsertStatus::*; use super::*; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::test_utils::*; use types::*; #[test] @@ -636,5 +642,287 @@ mod tests { (spec, state) } + /// Create a signed attestation for use in tests. + /// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`. + fn signed_attestation>( + committee: &CrosslinkCommittee, + keypairs: &[Keypair], + signing_range: R, + slot: Slot, + state: &BeaconState, + spec: &ChainSpec, + extra_signer: Option, + ) -> Attestation { + let mut builder = TestingAttestationBuilder::new( + state, + &committee.committee, + slot, + committee.shard, + spec, + ); + let signers = &committee.committee[signing_range]; + let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::>(); + builder.sign(signers, &committee_keys, &state.fork, spec); + extra_signer.map(|c_idx| { + let validator_index = committee.committee[c_idx]; + builder.sign( + &[validator_index], + &[&keypairs[validator_index].sk], + &state.fork, + spec, + ) + }); + builder.build() + } + + /// Test state for attestation-related tests. + fn attestation_test_state( + spec: &ChainSpec, + num_committees: usize, + ) -> (BeaconState, Vec) { + let num_validators = + num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize; + let mut state_builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, spec); + let slot_offset = 100000; + let slot = spec.genesis_slot + slot_offset; + state_builder.teleport_to_slot(slot, spec); + state_builder.build_caches(spec).unwrap(); + state_builder.build() + } + + /// Set the latest crosslink in the state to match the attestation. + fn fake_latest_crosslink(att: &Attestation, state: &mut BeaconState, spec: &ChainSpec) { + state.latest_crosslinks[att.data.shard as usize] = Crosslink { + crosslink_data_root: att.data.crosslink_data_root, + epoch: att.data.slot.epoch(spec.slots_per_epoch), + }; + } + + #[test] + fn test_attestation_score() { + let spec = &ChainSpec::foundation(); + let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); + let slot = state.slot - 1; + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + for committee in committees { + let att1 = signed_attestation(&committee, keypairs, ..2, slot, state, spec, None); + let att2 = signed_attestation(&committee, keypairs, .., slot, state, spec, None); + + assert_eq!( + att1.aggregation_bitfield.num_set_bits(), + attestation_score(&att1, state) + ); + + state + .current_epoch_attestations + .push(PendingAttestation::from_attestation(&att1, state.slot)); + + assert_eq!( + committee.committee.len() - 2, + attestation_score(&att2, &state) + ); + } + } + + /// End-to-end test of basic attestation handling. + #[test] + fn attestation_aggregation_insert_get_prune() { + let spec = &ChainSpec::foundation(); + let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); + let mut op_pool = OperationPool::new(); + + let slot = state.slot - 1; + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + assert_eq!( + committees.len(), + 1, + "we expect just one committee with this many validators" + ); + + for committee in &committees { + let step_size = 2; + for i in (0..committee.committee.len()).step_by(step_size) { + let att = signed_attestation( + committee, + keypairs, + i..i + step_size, + slot, + state, + spec, + None, + ); + fake_latest_crosslink(&att, state, spec); + op_pool.insert_attestation(att, state, spec).unwrap(); + } + } + + assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!(op_pool.num_attestations(), committees.len()); + + // Before the min attestation inclusion delay, get_attestations shouldn't return anything. + assert_eq!(op_pool.get_attestations(state, spec).len(), 0); + + // Then once the delay has elapsed, we should get a single aggregated attestation. + state.slot += spec.min_attestation_inclusion_delay; + + let block_attestations = op_pool.get_attestations(state, spec); + assert_eq!(block_attestations.len(), committees.len()); + + let agg_att = &block_attestations[0]; + assert_eq!( + agg_att.aggregation_bitfield.num_set_bits(), + spec.target_committee_size as usize + ); + + // Prune attestations shouldn't do anything at this point. + op_pool.prune_attestations(state, spec); + assert_eq!(op_pool.num_attestations(), committees.len()); + + // But once we advance to an epoch after the attestation, it should prune it out of + // existence. + state.slot = slot + spec.slots_per_epoch; + op_pool.prune_attestations(state, spec); + assert_eq!(op_pool.num_attestations(), 0); + } + + /// Adding an attestation already in the pool should not increase the size of the pool. + #[test] + fn attestation_duplicate() { + let spec = &ChainSpec::foundation(); + let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); + let mut op_pool = OperationPool::new(); + + let slot = state.slot - 1; + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + for committee in &committees { + let att = signed_attestation(committee, keypairs, .., slot, state, spec, None); + fake_latest_crosslink(&att, state, spec); + op_pool + .insert_attestation(att.clone(), state, spec) + .unwrap(); + op_pool.insert_attestation(att, state, spec).unwrap(); + } + + assert_eq!(op_pool.num_attestations(), committees.len()); + } + + /// Adding lots of attestations that only intersect pairwise should lead to two aggregate + /// attestations. + #[test] + fn attestation_pairwise_overlapping() { + let spec = &ChainSpec::foundation(); + let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); + let mut op_pool = OperationPool::new(); + + let slot = state.slot - 1; + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + let step_size = 2; + for committee in &committees { + // Create attestations that overlap on `step_size` validators, like: + // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... + for i in (0..committee.committee.len() - step_size).step_by(step_size) { + let att = signed_attestation( + committee, + keypairs, + i..i + 2 * step_size, + slot, + state, + spec, + None, + ); + fake_latest_crosslink(&att, state, spec); + op_pool.insert_attestation(att, state, spec).unwrap(); + } + } + + // The attestations should get aggregated into two attestations that comprise all + // validators. + assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!(op_pool.num_attestations(), 2 * committees.len()); + } + + /// Create a bunch of attestations signed by a small number of validators, and another + /// bunch signed by a larger number, such that there are at least `max_attestations` + /// signed by the larger number. Then, check that `get_attestations` only returns the + /// high-quality attestations. To ensure that no aggregation occurs, ALL attestations + /// are also signed by the 0th member of the committee. + #[test] + fn attestation_get_max() { + let spec = &ChainSpec::foundation(); + let small_step_size = 2; + let big_step_size = 4; + let (ref mut state, ref keypairs) = attestation_test_state(spec, big_step_size); + let mut op_pool = OperationPool::new(); + + let slot = state.slot - 1; + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + let max_attestations = spec.max_attestations as usize; + let target_committee_size = spec.target_committee_size as usize; + + let mut insert_attestations = |committee, step_size| { + for i in (0..target_committee_size).step_by(step_size) { + let att = signed_attestation( + committee, + keypairs, + i..i + step_size, + slot, + state, + spec, + if i == 0 { None } else { Some(0) }, + ); + fake_latest_crosslink(&att, state, spec); + op_pool.insert_attestation(att, state, spec).unwrap(); + } + }; + + for committee in &committees { + assert_eq!(committee.committee.len(), target_committee_size); + // Attestations signed by only 2-3 validators + insert_attestations(committee, small_step_size); + // Attestations signed by 4+ validators + insert_attestations(committee, big_step_size); + } + + let num_small = target_committee_size / small_step_size; + let num_big = target_committee_size / big_step_size; + + assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!( + op_pool.num_attestations(), + (num_small + num_big) * committees.len() + ); + assert!(op_pool.num_attestations() > max_attestations); + + state.slot += spec.min_attestation_inclusion_delay; + let best_attestations = op_pool.get_attestations(state, spec); + assert_eq!(best_attestations.len(), max_attestations); + + // All the best attestations should be signed by at least `big_step_size` (4) validators. + for att in &best_attestations { + assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size); + } + } + // TODO: more tests } From dd2351020cdf975ff8f20b003d4348502575ef72 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 17:58:02 +1100 Subject: [PATCH 20/33] Impl `add_aggregate` for FakeAggSig --- eth2/utils/bls/src/fake_aggregate_signature.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 23e2b54ef..85b06b956 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -35,6 +35,11 @@ impl FakeAggregateSignature { // Do nothing. } + /// Does glorious nothing. + pub fn add_aggregate(&mut self, _agg_sig: &FakeAggregateSignature) { + // Do nothing. + } + /// _Always_ returns `true`. pub fn verify( &self, From 46a978a5a92effb1bde7ce74a1e94308e038f0b1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 18:30:03 +1100 Subject: [PATCH 21/33] Implement op pool for all ops execpt attestations --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 258 ++++--------------- eth2/operation_pool/src/lib.rs | 2 +- 3 files changed, 51 insertions(+), 210 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b5471be5f..55d4bacfd 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -15,6 +15,7 @@ hashing = { path = "../../eth2/utils/hashing" } fork_choice = { path = "../../eth2/fork_choice" } parking_lot = "0.7" log = "0.4" +operation_pool = { path = "../../eth2/operation_pool" } env_logger = "0.6" serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 27398b6c9..1d0cfe8e3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,7 +6,8 @@ use db::{ ClientDB, DBError, }; use fork_choice::{ForkChoice, ForkChoiceError}; -use log::{debug, trace}; +use log::{debug, trace, warn}; +use operation_pool::OperationPool; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; use ssz::ssz_encode; @@ -83,6 +84,7 @@ pub struct BeaconChain { pub state_store: Arc>, pub slot_clock: U, pub attestation_aggregator: RwLock, + pub op_pool: RwLock, pub deposits_for_inclusion: RwLock>, pub exits_for_inclusion: RwLock>, pub transfers_for_inclusion: RwLock>, @@ -141,6 +143,7 @@ where state_store, slot_clock, attestation_aggregator, + op_pool: RwLock::new(OperationPool::new()), deposits_for_inclusion: RwLock::new(vec![]), exits_for_inclusion: RwLock::new(vec![]), transfers_for_inclusion: RwLock::new(vec![]), @@ -540,218 +543,48 @@ where /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { - // TODO: deposits are not checked for validity; check them. - // - // https://github.com/sigp/lighthouse/issues/276 - self.deposits_for_inclusion.write().push(deposit); - } - - /// Return a vec of deposits suitable for inclusion in some block. - pub fn get_deposits_for_block(&self) -> Vec { - // TODO: deposits are indiscriminately included; check them for validity. - // - // https://github.com/sigp/lighthouse/issues/275 - self.deposits_for_inclusion.read().clone() - } - - /// Takes a list of `Deposits` that were included in recent blocks and removes them from the - /// inclusion queue. - /// - /// This ensures that `Deposits` are not included twice in successive blocks. - pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) { - // TODO: method does not take forks into account; consider this. - // - // https://github.com/sigp/lighthouse/issues/275 - let mut indices_to_delete = vec![]; - - for included in included_deposits { - for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() { - if included == for_inclusion { - indices_to_delete.push(i); - } - } - } - - let deposits_for_inclusion = &mut self.deposits_for_inclusion.write(); - for i in indices_to_delete { - deposits_for_inclusion.remove(i); - } + // Bad deposits are ignored. + let _ = self + .op_pool + .write() + .insert_deposit(deposit, &*self.state.read(), &self.spec); } /// Accept some exit and queue it for inclusion in an appropriate block. pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) { - // TODO: exits are not checked for validity; check them. - // - // https://github.com/sigp/lighthouse/issues/276 - self.exits_for_inclusion.write().push(exit); - } - - /// Return a vec of exits suitable for inclusion in some block. - pub fn get_exits_for_block(&self) -> Vec { - // TODO: exits are indiscriminately included; check them for validity. - // - // https://github.com/sigp/lighthouse/issues/275 - self.exits_for_inclusion.read().clone() - } - - /// Takes a list of `Deposits` that were included in recent blocks and removes them from the - /// inclusion queue. - /// - /// This ensures that `Deposits` are not included twice in successive blocks. - pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) { - // TODO: method does not take forks into account; consider this. - let mut indices_to_delete = vec![]; - - for included in included_exits { - for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() { - if included == for_inclusion { - indices_to_delete.push(i); - } - } - } - - let exits_for_inclusion = &mut self.exits_for_inclusion.write(); - for i in indices_to_delete { - exits_for_inclusion.remove(i); - } + // Bad exits are ignored + let _ = self + .op_pool + .write() + .insert_voluntary_exit(exit, &*self.state.read(), &self.spec); } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) { - // TODO: transfers are not checked for validity; check them. - // - // https://github.com/sigp/lighthouse/issues/276 - self.transfers_for_inclusion.write().push(transfer); - } - - /// Return a vec of transfers suitable for inclusion in some block. - pub fn get_transfers_for_block(&self) -> Vec { - // TODO: transfers are indiscriminately included; check them for validity. - // - // https://github.com/sigp/lighthouse/issues/275 - self.transfers_for_inclusion.read().clone() - } - - /// Takes a list of `Deposits` that were included in recent blocks and removes them from the - /// inclusion queue. - /// - /// This ensures that `Deposits` are not included twice in successive blocks. - pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) { - // TODO: method does not take forks into account; consider this. - let mut indices_to_delete = vec![]; - - for included in included_transfers { - for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() { - if included == for_inclusion { - indices_to_delete.push(i); - } - } - } - - let transfers_for_inclusion = &mut self.transfers_for_inclusion.write(); - for i in indices_to_delete { - transfers_for_inclusion.remove(i); - } + // Bad transfers are ignored. + let _ = self + .op_pool + .write() + .insert_transfer(transfer, &*self.state.read(), &self.spec); } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { - // TODO: proposer_slashings are not checked for validity; check them. - // - // https://github.com/sigp/lighthouse/issues/276 - self.proposer_slashings_for_inclusion - .write() - .push(proposer_slashing); - } - - /// Return a vec of proposer slashings suitable for inclusion in some block. - pub fn get_proposer_slashings_for_block(&self) -> Vec { - // TODO: proposer_slashings are indiscriminately included; check them for validity. - // - // https://github.com/sigp/lighthouse/issues/275 - self.proposer_slashings_for_inclusion.read().clone() - } - - /// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them - /// from the inclusion queue. - /// - /// This ensures that `ProposerSlashings` are not included twice in successive blocks. - pub fn set_proposer_slashings_as_included( - &self, - included_proposer_slashings: &[ProposerSlashing], - ) { - // TODO: method does not take forks into account; consider this. - // - // https://github.com/sigp/lighthouse/issues/275 - let mut indices_to_delete = vec![]; - - for included in included_proposer_slashings { - for (i, for_inclusion) in self - .proposer_slashings_for_inclusion - .read() - .iter() - .enumerate() - { - if included == for_inclusion { - indices_to_delete.push(i); - } - } - } - - let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write(); - for i in indices_to_delete { - proposer_slashings_for_inclusion.remove(i); - } + // Bad proposer slashings are ignored. + let _ = self.op_pool.write().insert_proposer_slashing( + proposer_slashing, + &*self.state.read(), + &self.spec, + ); } /// Accept some attester slashing and queue it for inclusion in an appropriate block. pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) { - // TODO: attester_slashings are not checked for validity; check them. - // - // https://github.com/sigp/lighthouse/issues/276 - self.attester_slashings_for_inclusion - .write() - .push(attester_slashing); - } - - /// Return a vec of attester slashings suitable for inclusion in some block. - pub fn get_attester_slashings_for_block(&self) -> Vec { - // TODO: attester_slashings are indiscriminately included; check them for validity. - // - // https://github.com/sigp/lighthouse/issues/275 - self.attester_slashings_for_inclusion.read().clone() - } - - /// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them - /// from the inclusion queue. - /// - /// This ensures that `AttesterSlashings` are not included twice in successive blocks. - pub fn set_attester_slashings_as_included( - &self, - included_attester_slashings: &[AttesterSlashing], - ) { - // TODO: method does not take forks into account; consider this. - // - // https://github.com/sigp/lighthouse/issues/275 - let mut indices_to_delete = vec![]; - - for included in included_attester_slashings { - for (i, for_inclusion) in self - .attester_slashings_for_inclusion - .read() - .iter() - .enumerate() - { - if included == for_inclusion { - indices_to_delete.push(i); - } - } - } - - let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write(); - for i in indices_to_delete { - attester_slashings_for_inclusion.remove(i); - } + let _ = self.op_pool.write().insert_attester_slashing( + attester_slashing, + &*self.state.read(), + &self.spec, + ); } /// Returns `true` if the given block root has not been processed. @@ -832,13 +665,6 @@ where self.block_store.put(&block_root, &ssz_encode(&block)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?; - // Update the inclusion queues so they aren't re-submitted. - self.set_deposits_as_included(&block.body.deposits[..]); - self.set_transfers_as_included(&block.body.transfers[..]); - self.set_exits_as_included(&block.body.voluntary_exits[..]); - self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]); - self.set_attester_slashings_as_included(&block.body.attester_slashings[..]); - // run the fork_choice add_block logic self.fork_choice .write() @@ -888,6 +714,11 @@ where .get_block_root(state.slot - 1, &self.spec) .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; + let (proposer_slashings, attester_slashings) = self + .op_pool + .read() + .get_slashings(&*self.state.read(), &self.spec); + let mut block = BeaconBlock { slot: state.slot, previous_block_root, @@ -900,12 +731,21 @@ where deposit_root: Hash256::zero(), block_hash: Hash256::zero(), }, - proposer_slashings: self.get_proposer_slashings_for_block(), - attester_slashings: self.get_attester_slashings_for_block(), + proposer_slashings, + attester_slashings, attestations, - deposits: self.get_deposits_for_block(), - voluntary_exits: self.get_exits_for_block(), - transfers: self.get_transfers_for_block(), + deposits: self + .op_pool + .read() + .get_deposits(&*self.state.read(), &self.spec), + voluntary_exits: self + .op_pool + .read() + .get_voluntary_exits(&*self.state.read(), &self.spec), + transfers: self + .op_pool + .read() + .get_transfers(&*self.state.read(), &self.spec), }, }; diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index e67a201c7..c3de95b48 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -21,7 +21,7 @@ use types::{ #[cfg(test)] const VERIFY_DEPOSIT_PROOFS: bool = false; #[cfg(not(test))] -const VERIFY_DEPOSIT_PROOFS: bool = true; +const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this #[derive(Default)] pub struct OperationPool { From 8b1a91e9eec1d11ad833586ad43526eb465c6f21 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 18:40:50 +1100 Subject: [PATCH 22/33] Add `process_attestation` to `BeaconChain` --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1d0cfe8e3..5eadb3763 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -541,6 +541,17 @@ where Ok(aggregation_outcome) } + /// Accept a new attestation from the network. + /// + /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation + /// if possible. + pub fn process_attestation(&self, attestation: Attestation) { + let _ = + self.op_pool + .write() + .insert_attestation(attestation, &*self.state.read(), &self.spec); + } + /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { // Bad deposits are ignored. @@ -701,9 +712,9 @@ where trace!("Finding attestations for new block..."); let attestations = self - .attestation_aggregator + .op_pool .read() - .get_attestations_for_state(&state, &self.spec); + .get_attestations(&*self.state.read(), &self.spec); trace!( "Inserting {} attestation(s) into new block.", From 2b538510623cc8609f542617c948e876a7a348f5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 18:54:01 +1100 Subject: [PATCH 23/33] Pass errors back from block ops processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 69 +++++++++++-------- .../test_harness/src/beacon_chain_harness.rs | 16 +++-- 2 files changed, 53 insertions(+), 32 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5eadb3763..4d3ba9cab 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,10 +7,15 @@ use db::{ }; use fork_choice::{ForkChoice, ForkChoiceError}; use log::{debug, trace, warn}; +use operation_pool::DepositInsertStatus; use operation_pool::OperationPool; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; use ssz::ssz_encode; +pub use state_processing::per_block_processing::errors::{ + AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + ExitValidationError, ProposerSlashingValidationError, TransferValidationError, +}; use state_processing::{ per_block_processing, per_block_processing_without_verifying_block_signature, per_slot_processing, BlockProcessingError, SlotProcessingError, @@ -545,57 +550,67 @@ where /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. - pub fn process_attestation(&self, attestation: Attestation) { - let _ = - self.op_pool - .write() - .insert_attestation(attestation, &*self.state.read(), &self.spec); + pub fn process_attestation( + &self, + attestation: Attestation, + ) -> Result<(), AttestationValidationError> { + self.op_pool + .write() + .insert_attestation(attestation, &*self.state.read(), &self.spec) } /// Accept some deposit and queue it for inclusion in an appropriate block. - pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { - // Bad deposits are ignored. - let _ = self - .op_pool + pub fn receive_deposit_for_inclusion( + &self, + deposit: Deposit, + ) -> Result { + self.op_pool .write() - .insert_deposit(deposit, &*self.state.read(), &self.spec); + .insert_deposit(deposit, &*self.state.read(), &self.spec) } /// Accept some exit and queue it for inclusion in an appropriate block. - pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) { - // Bad exits are ignored - let _ = self - .op_pool + pub fn receive_exit_for_inclusion( + &self, + exit: VoluntaryExit, + ) -> Result<(), ExitValidationError> { + self.op_pool .write() - .insert_voluntary_exit(exit, &*self.state.read(), &self.spec); + .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) } /// Accept some transfer and queue it for inclusion in an appropriate block. - pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) { - // Bad transfers are ignored. - let _ = self - .op_pool + pub fn receive_transfer_for_inclusion( + &self, + transfer: Transfer, + ) -> Result<(), TransferValidationError> { + self.op_pool .write() - .insert_transfer(transfer, &*self.state.read(), &self.spec); + .insert_transfer(transfer, &*self.state.read(), &self.spec) } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. - pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { - // Bad proposer slashings are ignored. - let _ = self.op_pool.write().insert_proposer_slashing( + pub fn receive_proposer_slashing_for_inclusion( + &self, + proposer_slashing: ProposerSlashing, + ) -> Result<(), ProposerSlashingValidationError> { + self.op_pool.write().insert_proposer_slashing( proposer_slashing, &*self.state.read(), &self.spec, - ); + ) } /// Accept some attester slashing and queue it for inclusion in an appropriate block. - pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) { - let _ = self.op_pool.write().insert_attester_slashing( + pub fn receive_attester_slashing_for_inclusion( + &self, + attester_slashing: AttesterSlashing, + ) -> Result<(), AttesterSlashingValidationError> { + self.op_pool.write().insert_attester_slashing( attester_slashing, &*self.state.read(), &self.spec, - ); + ) } /// Returns `true` if the given block root has not been processed. diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 942497476..0784e5fd3 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -285,7 +285,9 @@ impl BeaconChainHarness { /// If a new `ValidatorHarness` was created, the validator should become fully operational as /// if the validator were created during `BeaconChainHarness` instantiation. pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option) { - self.beacon_chain.receive_deposit_for_inclusion(deposit); + self.beacon_chain + .receive_deposit_for_inclusion(deposit) + .unwrap(); // If a keypair is present, add a new `ValidatorHarness` to the rig. if let Some(keypair) = keypair { @@ -301,24 +303,28 @@ impl BeaconChainHarness { /// will stop receiving duties from the beacon chain and just do nothing when prompted to /// produce/attest. pub fn add_exit(&mut self, exit: VoluntaryExit) { - self.beacon_chain.receive_exit_for_inclusion(exit); + self.beacon_chain.receive_exit_for_inclusion(exit).unwrap(); } /// Submit an transfer to the `BeaconChain` for inclusion in some block. pub fn add_transfer(&mut self, transfer: Transfer) { - self.beacon_chain.receive_transfer_for_inclusion(transfer); + self.beacon_chain + .receive_transfer_for_inclusion(transfer) + .unwrap(); } /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { self.beacon_chain - .receive_proposer_slashing_for_inclusion(proposer_slashing); + .receive_proposer_slashing_for_inclusion(proposer_slashing) + .unwrap(); } /// Submit an attester slashing to the `BeaconChain` for inclusion in some block. pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) { self.beacon_chain - .receive_attester_slashing_for_inclusion(attester_slashing); + .receive_attester_slashing_for_inclusion(attester_slashing) + .unwrap(); } /// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head. From 8bf7a83f373a34070fb7765daaa7dd20fd28867c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 19:09:01 +1100 Subject: [PATCH 24/33] Rename op processing methods on BeaconChain --- beacon_node/beacon_chain/src/beacon_chain.rs | 28 ++++++++----------- .../test_harness/src/beacon_chain_harness.rs | 14 ++++------ 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4d3ba9cab..d3b9e2bdc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6,7 +6,7 @@ use db::{ ClientDB, DBError, }; use fork_choice::{ForkChoice, ForkChoiceError}; -use log::{debug, trace, warn}; +use log::{debug, trace}; use operation_pool::DepositInsertStatus; use operation_pool::OperationPool; use parking_lot::{RwLock, RwLockReadGuard}; @@ -560,7 +560,7 @@ where } /// Accept some deposit and queue it for inclusion in an appropriate block. - pub fn receive_deposit_for_inclusion( + pub fn process_deposit( &self, deposit: Deposit, ) -> Result { @@ -570,27 +570,21 @@ where } /// Accept some exit and queue it for inclusion in an appropriate block. - pub fn receive_exit_for_inclusion( - &self, - exit: VoluntaryExit, - ) -> Result<(), ExitValidationError> { + pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { self.op_pool .write() .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) } /// Accept some transfer and queue it for inclusion in an appropriate block. - pub fn receive_transfer_for_inclusion( - &self, - transfer: Transfer, - ) -> Result<(), TransferValidationError> { + pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { self.op_pool .write() .insert_transfer(transfer, &*self.state.read(), &self.spec) } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. - pub fn receive_proposer_slashing_for_inclusion( + pub fn process_proposer_slashing( &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { @@ -602,7 +596,7 @@ where } /// Accept some attester slashing and queue it for inclusion in an appropriate block. - pub fn receive_attester_slashing_for_inclusion( + pub fn process_attester_slashing( &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { @@ -613,11 +607,6 @@ where ) } - /// Returns `true` if the given block root has not been processed. - pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - Ok(!self.block_store.exists(beacon_block_root)?) - } - /// Accept some block and attempt to add it to block DAG. /// /// Will accept blocks from prior slots, however it will reject any block from a future slot. @@ -817,6 +806,11 @@ where Ok(()) } + /// Returns `true` if the given block root has not been processed. + pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { + Ok(!self.block_store.exists(beacon_block_root)?) + } + /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// /// This could be a very expensive operation and should only be done in testing/analysis diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 0784e5fd3..7d6a690e0 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -285,9 +285,7 @@ impl BeaconChainHarness { /// If a new `ValidatorHarness` was created, the validator should become fully operational as /// if the validator were created during `BeaconChainHarness` instantiation. pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option) { - self.beacon_chain - .receive_deposit_for_inclusion(deposit) - .unwrap(); + self.beacon_chain.process_deposit(deposit).unwrap(); // If a keypair is present, add a new `ValidatorHarness` to the rig. if let Some(keypair) = keypair { @@ -303,27 +301,25 @@ impl BeaconChainHarness { /// will stop receiving duties from the beacon chain and just do nothing when prompted to /// produce/attest. pub fn add_exit(&mut self, exit: VoluntaryExit) { - self.beacon_chain.receive_exit_for_inclusion(exit).unwrap(); + self.beacon_chain.process_voluntary_exit(exit).unwrap(); } /// Submit an transfer to the `BeaconChain` for inclusion in some block. pub fn add_transfer(&mut self, transfer: Transfer) { - self.beacon_chain - .receive_transfer_for_inclusion(transfer) - .unwrap(); + self.beacon_chain.process_transfer(transfer).unwrap(); } /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { self.beacon_chain - .receive_proposer_slashing_for_inclusion(proposer_slashing) + .process_proposer_slashing(proposer_slashing) .unwrap(); } /// Submit an attester slashing to the `BeaconChain` for inclusion in some block. pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) { self.beacon_chain - .receive_attester_slashing_for_inclusion(attester_slashing) + .process_attester_slashing(attester_slashing) .unwrap(); } From 1840248af8497e9704948bf01d862676ebd25956 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 12:00:31 +1100 Subject: [PATCH 25/33] Remove old queues from BeaconChain --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d3b9e2bdc..41fcb1128 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -90,11 +90,6 @@ pub struct BeaconChain { pub slot_clock: U, pub attestation_aggregator: RwLock, pub op_pool: RwLock, - pub deposits_for_inclusion: RwLock>, - pub exits_for_inclusion: RwLock>, - pub transfers_for_inclusion: RwLock>, - pub proposer_slashings_for_inclusion: RwLock>, - pub attester_slashings_for_inclusion: RwLock>, canonical_head: RwLock, finalized_head: RwLock, pub state: RwLock, @@ -149,11 +144,6 @@ where slot_clock, attestation_aggregator, op_pool: RwLock::new(OperationPool::new()), - deposits_for_inclusion: RwLock::new(vec![]), - exits_for_inclusion: RwLock::new(vec![]), - transfers_for_inclusion: RwLock::new(vec![]), - proposer_slashings_for_inclusion: RwLock::new(vec![]), - attester_slashings_for_inclusion: RwLock::new(vec![]), state: RwLock::new(genesis_state), finalized_head, canonical_head, From cd9494181c80d5c1af40246ff9033674d2246e64 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 12:26:25 +1100 Subject: [PATCH 26/33] Push RwLock down into OperationPool There used to be one massive lock on `BeaconChain.op_pool`, however that would cause unnecessary blocking. --- beacon_node/beacon_chain/src/beacon_chain.rs | 51 ++------- eth2/operation_pool/Cargo.toml | 1 + eth2/operation_pool/src/lib.rs | 113 ++++++++++--------- 3 files changed, 75 insertions(+), 90 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 41fcb1128..5e83fdd81 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -89,7 +89,7 @@ pub struct BeaconChain { pub state_store: Arc>, pub slot_clock: U, pub attestation_aggregator: RwLock, - pub op_pool: RwLock, + pub op_pool: OperationPool, canonical_head: RwLock, finalized_head: RwLock, pub state: RwLock, @@ -143,7 +143,7 @@ where state_store, slot_clock, attestation_aggregator, - op_pool: RwLock::new(OperationPool::new()), + op_pool: OperationPool::new(), state: RwLock::new(genesis_state), finalized_head, canonical_head, @@ -545,7 +545,6 @@ where attestation: Attestation, ) -> Result<(), AttestationValidationError> { self.op_pool - .write() .insert_attestation(attestation, &*self.state.read(), &self.spec) } @@ -555,21 +554,18 @@ where deposit: Deposit, ) -> Result { self.op_pool - .write() .insert_deposit(deposit, &*self.state.read(), &self.spec) } /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { self.op_pool - .write() .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { self.op_pool - .write() .insert_transfer(transfer, &*self.state.read(), &self.spec) } @@ -578,11 +574,8 @@ where &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - self.op_pool.write().insert_proposer_slashing( - proposer_slashing, - &*self.state.read(), - &self.spec, - ) + self.op_pool + .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec) } /// Accept some attester slashing and queue it for inclusion in an appropriate block. @@ -590,11 +583,8 @@ where &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - self.op_pool.write().insert_attester_slashing( - attester_slashing, - &*self.state.read(), - &self.spec, - ) + self.op_pool + .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) } /// Accept some block and attempt to add it to block DAG. @@ -705,24 +695,12 @@ where trace!("Finding attestations for new block..."); - let attestations = self - .op_pool - .read() - .get_attestations(&*self.state.read(), &self.spec); - - trace!( - "Inserting {} attestation(s) into new block.", - attestations.len() - ); - let previous_block_root = *state .get_block_root(state.slot - 1, &self.spec) .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; - let (proposer_slashings, attester_slashings) = self - .op_pool - .read() - .get_slashings(&*self.state.read(), &self.spec); + let (proposer_slashings, attester_slashings) = + self.op_pool.get_slashings(&*self.state.read(), &self.spec); let mut block = BeaconBlock { slot: state.slot, @@ -738,19 +716,14 @@ where }, proposer_slashings, attester_slashings, - attestations, - deposits: self + attestations: self .op_pool - .read() - .get_deposits(&*self.state.read(), &self.spec), + .get_attestations(&*self.state.read(), &self.spec), + deposits: self.op_pool.get_deposits(&*self.state.read(), &self.spec), voluntary_exits: self .op_pool - .read() .get_voluntary_exits(&*self.state.read(), &self.spec), - transfers: self - .op_pool - .read() - .get_transfers(&*self.state.read(), &self.spec), + transfers: self.op_pool.get_transfers(&*self.state.read(), &self.spec), }, }; diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index 07cb61864..67d13013c 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] int_to_bytes = { path = "../utils/int_to_bytes" } itertools = "0.8" +parking_lot = "0.7" types = { path = "../types" } state_processing = { path = "../state_processing" } ssz = { path = "../utils/ssz" } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index c3de95b48..c42527b60 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -1,5 +1,6 @@ use int_to_bytes::int_to_bytes8; use itertools::Itertools; +use parking_lot::RwLock; use ssz::ssz_encode; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, @@ -26,21 +27,21 @@ const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this #[derive(Default)] pub struct OperationPool { /// Map from attestation ID (see below) to vectors of attestations. - attestations: HashMap>, + attestations: RwLock>>, /// Map from deposit index to deposit data. // NOTE: We assume that there is only one deposit per index // because the Eth1 data is updated (at most) once per epoch, // and the spec doesn't seem to accomodate for re-orgs on a time-frame // longer than an epoch - deposits: BTreeMap, + deposits: RwLock>, /// Map from two attestation IDs to a slashing for those IDs. - attester_slashings: HashMap<(AttestationId, AttestationId), AttesterSlashing>, + attester_slashings: RwLock>, /// Map from proposer index to slashing. - proposer_slashings: HashMap, + proposer_slashings: RwLock>, /// Map from exiting validator to their exit data. - voluntary_exits: HashMap, + voluntary_exits: RwLock>, /// Set of transfers. - transfers: HashSet, + transfers: RwLock>, } /// Serialized `AttestationData` augmented with a domain to encode the fork info. @@ -109,7 +110,7 @@ impl OperationPool { /// Insert an attestation into the pool, aggregating it with existing attestations if possible. pub fn insert_attestation( - &mut self, + &self, attestation: Attestation, state: &BeaconState, spec: &ChainSpec, @@ -119,7 +120,10 @@ impl OperationPool { let id = AttestationId::from_data(&attestation.data, state, spec); - let existing_attestations = match self.attestations.entry(id) { + // Take a write lock on the attestations map. + let mut attestations = self.attestations.write(); + + let existing_attestations = match attestations.entry(id) { hash_map::Entry::Vacant(entry) => { entry.insert(vec![attestation]); return Ok(()); @@ -146,7 +150,11 @@ impl OperationPool { /// Total number of attestations in the pool, including attestations for the same data. pub fn num_attestations(&self) -> usize { - self.attestations.values().map(|atts| atts.len()).sum() + self.attestations + .read() + .values() + .map(|atts| atts.len()) + .sum() } /// Get a list of attestations for inclusion in a block. @@ -157,6 +165,7 @@ impl OperationPool { let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec); let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec); self.attestations + .read() .iter() .filter(|(key, _)| { key.domain_bytes_match(&prev_domain_bytes) @@ -180,8 +189,8 @@ impl OperationPool { // TODO: we could probably prune other attestations here: // - ones that are completely covered by attestations included in the state // - maybe ones invalidated by the confirmation of one fork over another - pub fn prune_attestations(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { - self.attestations.retain(|_, attestations| { + pub fn prune_attestations(&self, finalized_state: &BeaconState, spec: &ChainSpec) { + self.attestations.write().retain(|_, attestations| { // All the attestations in this bucket have the same data, so we only need to // check the first one. attestations.first().map_or(false, |att| { @@ -194,14 +203,14 @@ impl OperationPool { /// /// No two distinct deposits should be added with the same index. pub fn insert_deposit( - &mut self, + &self, deposit: Deposit, state: &BeaconState, spec: &ChainSpec, ) -> Result { use DepositInsertStatus::*; - match self.deposits.entry(deposit.index) { + match self.deposits.write().entry(deposit.index) { Entry::Vacant(entry) => { verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?; entry.insert(deposit); @@ -224,27 +233,26 @@ impl OperationPool { pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { let start_idx = state.deposit_index; (start_idx..start_idx + spec.max_deposits) - .map(|idx| self.deposits.get(&idx)) + .map(|idx| self.deposits.read().get(&idx).cloned()) .take_while(Option::is_some) .flatten() - .cloned() .collect() } /// Remove all deposits with index less than the deposit index of the latest finalised block. - pub fn prune_deposits(&mut self, state: &BeaconState) -> BTreeMap { - let deposits_keep = self.deposits.split_off(&state.deposit_index); - std::mem::replace(&mut self.deposits, deposits_keep) + pub fn prune_deposits(&self, state: &BeaconState) -> BTreeMap { + let deposits_keep = self.deposits.write().split_off(&state.deposit_index); + std::mem::replace(&mut self.deposits.write(), deposits_keep) } /// The number of deposits stored in the pool. pub fn num_deposits(&self) -> usize { - self.deposits.len() + self.deposits.read().len() } /// Insert a proposer slashing into the pool. pub fn insert_proposer_slashing( - &mut self, + &self, slashing: ProposerSlashing, state: &BeaconState, spec: &ChainSpec, @@ -253,6 +261,7 @@ impl OperationPool { // because they could *become* known later verify_proposer_slashing(&slashing, state, spec)?; self.proposer_slashings + .write() .insert(slashing.proposer_index, slashing); Ok(()) } @@ -273,14 +282,14 @@ impl OperationPool { /// Insert an attester slashing into the pool. pub fn insert_attester_slashing( - &mut self, + &self, slashing: AttesterSlashing, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), AttesterSlashingValidationError> { verify_attester_slashing(state, &slashing, true, spec)?; let id = Self::attester_slashing_id(&slashing, state, spec); - self.attester_slashings.insert(id, slashing); + self.attester_slashings.write().insert(id, slashing); Ok(()) } @@ -295,7 +304,7 @@ impl OperationPool { spec: &ChainSpec, ) -> (Vec, Vec) { let proposer_slashings = filter_limit_operations( - self.proposer_slashings.values(), + self.proposer_slashings.read().values(), |slashing| { state .validator_registry @@ -314,6 +323,7 @@ impl OperationPool { let attester_slashings = self .attester_slashings + .read() .iter() .filter(|(id, slashing)| { // Check the fork. @@ -345,9 +355,9 @@ impl OperationPool { } /// Prune proposer slashings for all slashed or withdrawn validators. - pub fn prune_proposer_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) { prune_validator_hash_map( - &mut self.proposer_slashings, + &mut self.proposer_slashings.write(), |validator| { validator.slashed || validator.is_withdrawable_at(finalized_state.current_epoch(spec)) @@ -358,8 +368,8 @@ impl OperationPool { /// Prune attester slashings for all slashed or withdrawn validators, or attestations on another /// fork. - pub fn prune_attester_slashings(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { - self.attester_slashings.retain(|id, slashing| { + pub fn prune_attester_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) { + self.attester_slashings.write().retain(|id, slashing| { let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id; let curr_epoch = finalized_state.current_epoch(spec); let slashing_ok = gather_attester_slashing_indices_modular( @@ -375,29 +385,31 @@ impl OperationPool { /// Insert a voluntary exit, validating it almost-entirely (future exits are permitted). pub fn insert_voluntary_exit( - &mut self, + &self, exit: VoluntaryExit, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), ExitValidationError> { verify_exit_time_independent_only(state, &exit, spec)?; - self.voluntary_exits.insert(exit.validator_index, exit); + self.voluntary_exits + .write() + .insert(exit.validator_index, exit); Ok(()) } /// Get a list of voluntary exits for inclusion in a block. pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { filter_limit_operations( - self.voluntary_exits.values(), + self.voluntary_exits.read().values(), |exit| verify_exit(state, exit, spec).is_ok(), spec.max_voluntary_exits, ) } /// Prune if validator has already exited at the last finalized state. - pub fn prune_voluntary_exits(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState, spec: &ChainSpec) { prune_validator_hash_map( - &mut self.voluntary_exits, + &mut self.voluntary_exits.write(), |validator| validator.is_exited_at(finalized_state.current_epoch(spec)), finalized_state, ); @@ -405,7 +417,7 @@ impl OperationPool { /// Insert a transfer into the pool, checking it for validity in the process. pub fn insert_transfer( - &mut self, + &self, transfer: Transfer, state: &BeaconState, spec: &ChainSpec, @@ -414,7 +426,7 @@ impl OperationPool { // it before we insert into the HashSet, we can't end up with duplicate // transactions. verify_transfer_time_independent_only(state, &transfer, spec)?; - self.transfers.insert(transfer); + self.transfers.write().insert(transfer); Ok(()) } @@ -423,6 +435,7 @@ impl OperationPool { // dependencies between transfers in the same block e.g. A pays B, B pays C pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { self.transfers + .read() .iter() .filter(|transfer| verify_transfer(state, transfer, spec).is_ok()) .sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee)) @@ -432,16 +445,14 @@ impl OperationPool { } /// Prune the set of transfers by removing all those whose slot has already passed. - pub fn prune_transfers(&mut self, finalized_state: &BeaconState) { - self.transfers = self - .transfers - .drain() - .filter(|transfer| transfer.slot > finalized_state.slot) - .collect(); + pub fn prune_transfers(&self, finalized_state: &BeaconState) { + self.transfers + .write() + .retain(|transfer| transfer.slot > finalized_state.slot) } /// Prune all types of transactions given the latest finalized state. - pub fn prune_all(&mut self, finalized_state: &BeaconState, spec: &ChainSpec) { + pub fn prune_all(&self, finalized_state: &BeaconState, spec: &ChainSpec) { self.prune_attestations(finalized_state, spec); self.prune_deposits(finalized_state); self.prune_proposer_slashings(finalized_state, spec); @@ -497,7 +508,7 @@ mod tests { fn insert_deposit() { let rng = &mut XorShiftRng::from_seed([42; 16]); let (ref spec, ref state) = test_state(rng); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let deposit1 = make_deposit(rng, state, spec); let mut deposit2 = make_deposit(rng, state, spec); deposit2.index = deposit1.index; @@ -520,7 +531,7 @@ mod tests { fn get_deposits_max() { let rng = &mut XorShiftRng::from_seed([42; 16]); let (spec, mut state) = test_state(rng); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let start = 10000; let max_deposits = spec.max_deposits; let extra = 5; @@ -550,7 +561,7 @@ mod tests { fn prune_deposits() { let rng = &mut XorShiftRng::from_seed([42; 16]); let (spec, state) = test_state(rng); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let start1 = 100; // test is super slow in debug mode if this parameter is too high @@ -734,7 +745,7 @@ mod tests { fn attestation_aggregation_insert_get_prune() { let spec = &ChainSpec::foundation(); let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let slot = state.slot - 1; let committees = state @@ -765,7 +776,7 @@ mod tests { } } - assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!(op_pool.attestations.read().len(), committees.len()); assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. @@ -799,7 +810,7 @@ mod tests { fn attestation_duplicate() { let spec = &ChainSpec::foundation(); let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let slot = state.slot - 1; let committees = state @@ -825,7 +836,7 @@ mod tests { fn attestation_pairwise_overlapping() { let spec = &ChainSpec::foundation(); let (ref mut state, ref keypairs) = attestation_test_state(spec, 1); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let slot = state.slot - 1; let committees = state @@ -854,7 +865,7 @@ mod tests { // The attestations should get aggregated into two attestations that comprise all // validators. - assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!(op_pool.attestations.read().len(), committees.len()); assert_eq!(op_pool.num_attestations(), 2 * committees.len()); } @@ -869,7 +880,7 @@ mod tests { let small_step_size = 2; let big_step_size = 4; let (ref mut state, ref keypairs) = attestation_test_state(spec, big_step_size); - let mut op_pool = OperationPool::new(); + let op_pool = OperationPool::new(); let slot = state.slot - 1; let committees = state @@ -907,7 +918,7 @@ mod tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.len(), committees.len()); + assert_eq!(op_pool.attestations.read().len(), committees.len()); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len() From 89cc92572a1764be16c12dcdd8b205fea1770b23 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 13:03:05 +1100 Subject: [PATCH 27/33] Add `test_harness` tests for attestation count --- .../specs/validator_registry.yaml | 2 ++ .../test_harness/src/test_case/state_check.rs | 27 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml index 0c4f5004b..1674ecffc 100644 --- a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml +++ b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml @@ -47,6 +47,8 @@ test_cases: states: - slot: 63 num_validators: 1003 + num_previous_epoch_attestations: 0 + num_current_epoch_attestations: 10 slashed_validators: [11, 12, 13, 14, 42] exited_validators: [] exit_initiated_validators: [50] diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs b/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs index 4d2bfd07d..7ac33c86c 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs @@ -16,6 +16,10 @@ pub struct StateCheck { pub slot: Slot, /// Checked against `beacon_state.validator_registry.len()`. pub num_validators: Option, + /// The number of pending attestations from the previous epoch that should be in the state. + pub num_previous_epoch_attestations: Option, + /// The number of pending attestations from the current epoch that should be in the state. + pub num_current_epoch_attestations: Option, /// A list of validator indices which have been penalized. Must be in ascending order. pub slashed_validators: Option>, /// A list of validator indices which have been fully exited. Must be in ascending order. @@ -34,6 +38,8 @@ impl StateCheck { Self { slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")), num_validators: as_usize(&yaml, "num_validators"), + num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"), + num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"), slashed_validators: as_vec_u64(&yaml, "slashed_validators"), exited_validators: as_vec_u64(&yaml, "exited_validators"), exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"), @@ -58,6 +64,7 @@ impl StateCheck { "State slot is invalid." ); + // Check the validator count if let Some(num_validators) = self.num_validators { assert_eq!( state.validator_registry.len(), @@ -67,6 +74,26 @@ impl StateCheck { info!("OK: num_validators = {}.", num_validators); } + // Check the previous epoch attestations + if let Some(n) = self.num_previous_epoch_attestations { + assert_eq!( + state.previous_epoch_attestations.len(), + n, + "previous epoch attestations count != expected." + ); + info!("OK: num_previous_epoch_attestations = {}.", n); + } + + // Check the current epoch attestations + if let Some(n) = self.num_current_epoch_attestations { + assert_eq!( + state.current_epoch_attestations.len(), + n, + "current epoch attestations count != expected." + ); + info!("OK: num_current_epoch_attestations = {}.", n); + } + // Check for slashed validators. if let Some(ref slashed_validators) = self.slashed_validators { let actually_slashed_validators: Vec = state From 397e104f9b4b65649ef60922059d3a4b0f375b3e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 16:02:09 +1100 Subject: [PATCH 28/33] Implement `Attestation` building in test harness --- beacon_node/beacon_chain/src/beacon_chain.rs | 33 +---- .../test_harness/src/beacon_chain_harness.rs | 114 ++++++++++-------- .../validator_harness/direct_beacon_node.rs | 2 +- eth2/types/src/attestation_duty.rs | 2 +- .../generate_deterministic_keypairs.rs | 2 +- 5 files changed, 69 insertions(+), 84 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5e83fdd81..614cc46d8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,4 +1,3 @@ -use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use db::{ @@ -88,7 +87,6 @@ pub struct BeaconChain { pub block_store: Arc>, pub state_store: Arc>, pub slot_clock: U, - pub attestation_aggregator: RwLock, pub op_pool: OperationPool, canonical_head: RwLock, finalized_head: RwLock, @@ -131,7 +129,6 @@ where genesis_state.clone(), state_root, )); - let attestation_aggregator = RwLock::new(AttestationAggregator::new()); genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?; @@ -142,7 +139,6 @@ where block_store, state_store, slot_clock, - attestation_aggregator, op_pool: OperationPool::new(), state: RwLock::new(genesis_state), finalized_head, @@ -477,7 +473,7 @@ where } /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. - pub fn produce_attestation(&self, shard: u64) -> Result { + pub fn produce_attestation_data(&self, shard: u64) -> Result { trace!("BeaconChain::produce_attestation: shard: {}", shard); let source_epoch = self.state.read().current_justified_epoch; let source_root = *self.state.read().get_block_root( @@ -509,33 +505,6 @@ where }) } - /// Validate a `FreeAttestation` and either: - /// - /// - Create a new `Attestation`. - /// - Aggregate it to an existing `Attestation`. - pub fn process_free_attestation( - &self, - free_attestation: FreeAttestation, - ) -> Result { - let aggregation_outcome = self - .attestation_aggregator - .write() - .process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?; - - // return if the attestation is invalid - if !aggregation_outcome.valid { - return Ok(aggregation_outcome); - } - - // valid attestation, proceed with fork-choice logic - self.fork_choice.write().add_attestation( - free_attestation.validator_index, - &free_attestation.data.beacon_block_root, - &self.spec, - )?; - Ok(aggregation_outcome) - } - /// Accept a new attestation from the network. /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 7d6a690e0..b7acac9e1 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -10,8 +10,6 @@ use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; use ssz::TreeHash; -use std::collections::HashSet; -use std::iter::FromIterator; use std::sync::Arc; use types::{test_utils::TestingBeaconStateBuilder, *}; @@ -137,51 +135,64 @@ impl BeaconChainHarness { slot } - /// Gather the `FreeAttestation`s from the valiators. - /// - /// Note: validators will only produce attestations _once per slot_. So, if you call this twice - /// you'll only get attestations on the first run. - pub fn gather_free_attesations(&mut self) -> Vec { + pub fn gather_attesations(&mut self) -> Vec { let present_slot = self.beacon_chain.present_slot(); + let state = self.beacon_chain.state.read(); - let attesting_validators = self - .beacon_chain - .state - .read() + let mut attestations = vec![]; + + for committee in state .get_crosslink_committees_at_slot(present_slot, &self.spec) .unwrap() - .iter() - .fold(vec![], |mut acc, c| { - acc.append(&mut c.committee.clone()); - acc - }); - let attesting_validators: HashSet = - HashSet::from_iter(attesting_validators.iter().cloned()); + { + for &validator in &committee.committee { + let duties = state + .get_attestation_duties(validator, &self.spec) + .unwrap() + .expect("Attesting validators by definition have duties"); - let free_attestations: Vec = self - .validators - .par_iter_mut() - .enumerate() - .filter_map(|(i, validator)| { - if attesting_validators.contains(&i) { - // Advance the validator slot. - validator.set_slot(present_slot); + // Obtain `AttestationData` from the beacon chain. + let data = self + .beacon_chain + .produce_attestation_data(duties.shard) + .unwrap(); - // Prompt the validator to produce an attestation (if required). - validator.produce_free_attestation().ok() - } else { - None - } - }) - .collect(); + // Produce an aggregate signature with a single signature. + let aggregate_signature = { + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .hash_tree_root(); + let domain = self.spec.get_domain( + state.slot.epoch(self.spec.slots_per_epoch), + Domain::Attestation, + &state.fork, + ); + let sig = + Signature::new(&message, domain, &self.validators[validator].keypair.sk); - debug!( - "Gathered {} FreeAttestations for slot {}.", - free_attestations.len(), - present_slot - ); + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&sig); - free_attestations + agg_sig + }; + + let mut aggregation_bitfield = Bitfield::with_capacity(committee.committee.len()); + let custody_bitfield = Bitfield::with_capacity(committee.committee.len()); + + aggregation_bitfield.set(duties.committee_index, true); + + attestations.push(Attestation { + aggregation_bitfield, + data, + custody_bitfield, + aggregate_signature, + }) + } + } + + attestations } /// Get the block from the proposer for the slot. @@ -200,7 +211,9 @@ impl BeaconChainHarness { // Ensure the validators slot clock is accurate. self.validators[proposer].set_slot(present_slot); - self.validators[proposer].produce_block().unwrap() + let block = self.validators[proposer].produce_block().unwrap(); + + block } /// Advances the chain with a BeaconBlock and attestations from all validators. @@ -219,20 +232,23 @@ impl BeaconChainHarness { }; debug!("...block processed by BeaconChain."); - debug!("Producing free attestations..."); + debug!("Producing attestations..."); // Produce new attestations. - let free_attestations = self.gather_free_attesations(); + let attestations = self.gather_attesations(); - debug!("Processing free attestations..."); + debug!("Processing {} attestations...", attestations.len()); - free_attestations.par_iter().for_each(|free_attestation| { - self.beacon_chain - .process_free_attestation(free_attestation.clone()) - .unwrap(); - }); + attestations + .par_iter() + .enumerate() + .for_each(|(i, attestation)| { + self.beacon_chain + .process_attestation(attestation.clone()) + .expect(&format!("Attestation {} invalid: {:?}", i, attestation)); + }); - debug!("Free attestations processed."); + debug!("Attestations processed."); block } diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs index fde8211ab..7853459d7 100644 --- a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs @@ -55,7 +55,7 @@ impl AttesterBeaconNode for DirectBeac _slot: Slot, shard: u64, ) -> Result, NodeError> { - match self.beacon_chain.produce_attestation(shard) { + match self.beacon_chain.produce_attestation_data(shard) { Ok(attestation_data) => Ok(Some(attestation_data)), Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))), } diff --git a/eth2/types/src/attestation_duty.rs b/eth2/types/src/attestation_duty.rs index f6e86d263..80d912a83 100644 --- a/eth2/types/src/attestation_duty.rs +++ b/eth2/types/src/attestation_duty.rs @@ -1,7 +1,7 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; -#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { pub slot: Slot, pub shard: Shard, diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index f2ce8709e..37880a988 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -19,7 +19,7 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { .collect::>() .par_iter() .map(|&i| { - let secret = int_to_bytes48(i as u64 + 1); + let secret = int_to_bytes48(i as u64 + 1000); let sk = SecretKey::from_bytes(&secret).unwrap(); let pk = PublicKey::from_secret_key(&sk); Keypair { sk, pk } From 7b3f317abfe3439e20f3148596fec58d42c33016 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 17:12:43 +1100 Subject: [PATCH 29/33] Fix bug with attestation production It was being produced with the wrong source root. I will raise an issue on the spec as it's a tricky one. --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 614cc46d8..45a28b782 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -475,11 +475,7 @@ where /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. pub fn produce_attestation_data(&self, shard: u64) -> Result { trace!("BeaconChain::produce_attestation: shard: {}", shard); - let source_epoch = self.state.read().current_justified_epoch; - let source_root = *self.state.read().get_block_root( - source_epoch.start_slot(self.spec.slots_per_epoch), - &self.spec, - )?; + let state = self.state.read(); let target_root = *self.state.read().get_block_root( self.state @@ -500,8 +496,8 @@ where epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), crosslink_data_root: Hash256::zero(), }, - source_epoch, - source_root, + source_epoch: state.current_justified_epoch, + source_root: state.current_justified_root, }) } From dbcc88ad67aff419a9af00037809d599dc3dc1e9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 17:13:23 +1100 Subject: [PATCH 30/33] Ensure BitVec is initialized using a multiple of 8 I found it was panic-ing when supplied a non-power-of-zero len. --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 ++++- eth2/utils/boolean-bitfield/src/lib.rs | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 45a28b782..7c2336a28 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -692,7 +692,10 @@ where }, }; - trace!("BeaconChain::produce_block: updating state for new block.",); + debug!( + "Produced block with {} attestations, updating state.", + block.body.attestations.len() + ); per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index cdd0bc3d7..d90b28dc5 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -33,9 +33,11 @@ impl BooleanBitfield { } /// Create a new bitfield with the given length `initial_len` and all values set to `bit`. - pub fn from_elem(inital_len: usize, bit: bool) -> Self { + pub fn from_elem(initial_len: usize, bit: bool) -> Self { + // BitVec can panic if we don't set the len to be a multiple of 8. + let len = ((initial_len + 7) / 8) * 8; Self { - 0: BitVec::from_elem(inital_len, bit), + 0: BitVec::from_elem(len, bit), } } From ed6d0b46d03b088f5074b3b17c723723c19d9437 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 17:16:04 +1100 Subject: [PATCH 31/33] Add committee len to AttesterDuties --- eth2/types/src/attestation_duty.rs | 1 + eth2/types/src/beacon_state/epoch_cache.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/eth2/types/src/attestation_duty.rs b/eth2/types/src/attestation_duty.rs index 80d912a83..299fdd44c 100644 --- a/eth2/types/src/attestation_duty.rs +++ b/eth2/types/src/attestation_duty.rs @@ -6,4 +6,5 @@ pub struct AttestationDuty { pub slot: Slot, pub shard: Shard, pub committee_index: usize, + pub committee_len: usize, } diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 32d9a643e..62df90271 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -92,6 +92,7 @@ impl EpochCache { slot, shard, committee_index: k, + committee_len: crosslink_committee.committee.len(), }; attestation_duties[*validator_index] = Some(attestation_duty) } From 64507950dd1193d3c5032c33afe496b83975e227 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 30 Mar 2019 17:31:58 +1100 Subject: [PATCH 32/33] Use committe_len in test_harness --- .../beacon_chain/test_harness/src/beacon_chain_harness.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index b7acac9e1..33c12d7c7 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -178,8 +178,8 @@ impl BeaconChainHarness { agg_sig }; - let mut aggregation_bitfield = Bitfield::with_capacity(committee.committee.len()); - let custody_bitfield = Bitfield::with_capacity(committee.committee.len()); + let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len); + let custody_bitfield = Bitfield::with_capacity(duties.committee_len); aggregation_bitfield.set(duties.committee_index, true); From ddd9654f7001adb1ac254a9cefa76c297af91a45 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 1 Apr 2019 13:34:43 +1100 Subject: [PATCH 33/33] op-pool: fix bug in attestation_score The attestation scoring function was looking only at the previous epoch, but should really look at whichever epoch is appropriate for a given attestation. We also avoid including attestations that don't pay us any reward, as they simply bloat the chain. --- eth2/operation_pool/src/lib.rs | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index c42527b60..0c5d78fe4 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -74,15 +74,26 @@ impl AttestationId { /// the aggregate attestation introduces, and is proportional to the size of the reward we will /// receive for including it in a block. // TODO: this could be optimised with a map from validator index to whether that validator has -// attested in the *current* epoch. Alternatively, we could cache an index that allows us to -// quickly look up the attestations in the current epoch for a given shard. -fn attestation_score(attestation: &Attestation, state: &BeaconState) -> usize { +// attested in each of the current and previous epochs. Currently quadractic in number of validators. +fn attestation_score(attestation: &Attestation, state: &BeaconState, spec: &ChainSpec) -> usize { // Bitfield of validators whose attestations are new/fresh. let mut new_validators = attestation.aggregation_bitfield.clone(); - state - .current_epoch_attestations + let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); + + let state_attestations = if attestation_epoch == state.current_epoch(spec) { + &state.current_epoch_attestations + } else if attestation_epoch == state.previous_epoch(spec) { + &state.previous_epoch_attestations + } else { + return 0; + }; + + state_attestations .iter() + // In a single epoch, an attester should only be attesting for one shard. + // TODO: we avoid including slashable attestations in the state here, + // but maybe we should do something else with them (like construct slashings). .filter(|current_attestation| current_attestation.data.shard == attestation.data.shard) .for_each(|current_attestation| { // Remove the validators who have signed the existing attestation (they are not new) @@ -176,7 +187,9 @@ impl OperationPool { .filter(|attestation| validate_attestation(state, attestation, spec).is_ok()) // Scored by the number of new attestations they introduce (descending) // TODO: need to consider attestations introduced in THIS block - .map(|att| (att, attestation_score(att, state))) + .map(|att| (att, attestation_score(att, state, spec))) + // Don't include any useless attestations (score 0) + .filter(|&(_, score)| score != 0) .sorted_by_key(|&(_, score)| std::cmp::Reverse(score)) // Limited to the maximum number of attestations per block .take(spec.max_attestations as usize) @@ -695,7 +708,7 @@ mod tests { num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize; let mut state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, spec); - let slot_offset = 100000; + let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2; let slot = spec.genesis_slot + slot_offset; state_builder.teleport_to_slot(slot, spec); state_builder.build_caches(spec).unwrap(); @@ -726,7 +739,7 @@ mod tests { assert_eq!( att1.aggregation_bitfield.num_set_bits(), - attestation_score(&att1, state) + attestation_score(&att1, state, spec) ); state @@ -735,7 +748,7 @@ mod tests { assert_eq!( committee.committee.len() - 2, - attestation_score(&att2, &state) + attestation_score(&att2, state, spec) ); } }