Merge pull request #343 from sigp/testnet-client

Testnet client
This commit is contained in:
Paul Hauner 2019-04-05 16:23:59 +11:00 committed by GitHub
commit a46f676f89
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
129 changed files with 17645 additions and 2383 deletions

View File

@ -3,7 +3,9 @@ members = [
"eth2/attester", "eth2/attester",
"eth2/block_proposer", "eth2/block_proposer",
"eth2/fork_choice", "eth2/fork_choice",
"eth2/operation_pool",
"eth2/state_processing", "eth2/state_processing",
"eth2/state_processing/yaml_utils",
"eth2/types", "eth2/types",
"eth2/utils/bls", "eth2/utils/bls",
"eth2/utils/boolean-bitfield", "eth2/utils/boolean-bitfield",

View File

@ -9,7 +9,7 @@ types = { path = "../eth2/types" }
client = { path = "client" } client = { path = "client" }
version = { path = "version" } version = { path = "version" }
clap = "2.32.0" clap = "2.32.0"
slog = "^2.2.3" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
ctrlc = { version = "3.1.1", features = ["termination"] } ctrlc = { version = "3.1.1", features = ["termination"] }

View File

@ -15,6 +15,7 @@ hashing = { path = "../../eth2/utils/hashing" }
fork_choice = { path = "../../eth2/fork_choice" } fork_choice = { path = "../../eth2/fork_choice" }
parking_lot = "0.7" parking_lot = "0.7"
log = "0.4" log = "0.4"
operation_pool = { path = "../../eth2/operation_pool" }
env_logger = "0.6" env_logger = "0.6"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"

View File

@ -1,218 +0,0 @@
use ssz::TreeHash;
use state_processing::per_block_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet};
use types::*;
const PHASE_0_CUSTODY_BIT: bool = false;
/// Provides the functionality to:
///
/// - Recieve a `FreeAttestation` and aggregate it into an `Attestation` (or create a new if it
/// doesn't exist).
/// - Store all aggregated or created `Attestation`s.
/// - Produce a list of attestations that would be valid for inclusion in some `BeaconState` (and
/// therefore valid for inclusion in a `BeaconBlock`.
///
/// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be
/// rectified in a future revision.
#[derive(Default)]
pub struct AttestationAggregator {
store: HashMap<Vec<u8>, Attestation>,
}
pub struct Outcome {
pub valid: bool,
pub message: Message,
}
pub enum Message {
/// The free attestation was added to an existing attestation.
Aggregated,
/// The free attestation has already been aggregated to an existing attestation.
AggregationNotRequired,
/// The free attestation was transformed into a new attestation.
NewAttestationCreated,
/// The supplied `validator_index` is not in the committee for the given `shard` and `slot`.
BadValidatorIndex,
/// The given `signature` did not match the `pubkey` in the given
/// `state.validator_registry`.
BadSignature,
/// The given `slot` does not match the validators committee assignment.
BadSlot,
/// The given `shard` does not match the validators committee assignment, or is not included in
/// a committee for the given slot.
BadShard,
/// Attestation is from the epoch prior to this, ignoring.
TooOld,
}
macro_rules! valid_outcome {
($error: expr) => {
return Ok(Outcome {
valid: true,
message: $error,
});
};
}
macro_rules! invalid_outcome {
($error: expr) => {
return Ok(Outcome {
valid: false,
message: $error,
});
};
}
impl AttestationAggregator {
/// Instantiates a new AttestationAggregator with an empty database.
pub fn new() -> Self {
Self {
store: HashMap::new(),
}
}
/// Accepts some `FreeAttestation`, validates it and either aggregates it upon some existing
/// `Attestation` or produces a new `Attestation`.
///
/// The "validation" provided is not complete, instead the following points are checked:
/// - The given `validator_index` is in the committee for the given `shard` for the given
/// `slot`.
/// - The signature is verified against that of the validator at `validator_index`.
pub fn process_free_attestation(
&mut self,
state: &BeaconState,
free_attestation: &FreeAttestation,
spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> {
let duties =
match state.get_attestation_duties(free_attestation.validator_index as usize, spec) {
Err(BeaconStateError::EpochCacheUninitialized(e)) => {
panic!("Attempted to access unbuilt cache {:?}.", e)
}
Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld),
Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard),
Err(e) => return Err(e),
Ok(None) => invalid_outcome!(Message::BadValidatorIndex),
Ok(Some(attestation_duties)) => attestation_duties,
};
if free_attestation.data.slot != duties.slot {
invalid_outcome!(Message::BadSlot);
}
if free_attestation.data.shard != duties.shard {
invalid_outcome!(Message::BadShard);
}
let signable_message = AttestationDataAndCustodyBit {
data: free_attestation.data.clone(),
custody_bit: PHASE_0_CUSTODY_BIT,
}
.hash_tree_root();
let validator_record = match state
.validator_registry
.get(free_attestation.validator_index as usize)
{
None => invalid_outcome!(Message::BadValidatorIndex),
Some(validator_record) => validator_record,
};
if !free_attestation.signature.verify(
&signable_message,
spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork),
&validator_record.pubkey,
) {
invalid_outcome!(Message::BadSignature);
}
if let Some(existing_attestation) = self.store.get(&signable_message) {
if let Some(updated_attestation) = aggregate_attestation(
existing_attestation,
&free_attestation.signature,
duties.committee_index as usize,
) {
self.store.insert(signable_message, updated_attestation);
valid_outcome!(Message::Aggregated);
} else {
valid_outcome!(Message::AggregationNotRequired);
}
} else {
let mut aggregate_signature = AggregateSignature::new();
aggregate_signature.add(&free_attestation.signature);
let mut aggregation_bitfield = Bitfield::new();
aggregation_bitfield.set(duties.committee_index as usize, true);
let new_attestation = Attestation {
data: free_attestation.data.clone(),
aggregation_bitfield,
custody_bitfield: Bitfield::new(),
aggregate_signature,
};
self.store.insert(signable_message, new_attestation);
valid_outcome!(Message::NewAttestationCreated);
}
}
/// Returns all known attestations which are:
///
/// - Valid for the given state
/// - Not already in `state.latest_attestations`.
pub fn get_attestations_for_state(
&self,
state: &BeaconState,
spec: &ChainSpec,
) -> Vec<Attestation> {
let mut known_attestation_data: HashSet<AttestationData> = HashSet::new();
state
.previous_epoch_attestations
.iter()
.chain(state.current_epoch_attestations.iter())
.for_each(|attestation| {
known_attestation_data.insert(attestation.data.clone());
});
self.store
.values()
.filter_map(|attestation| {
if validate_attestation_without_signature(&state, attestation, spec).is_ok()
&& !known_attestation_data.contains(&attestation.data)
{
Some(attestation.clone())
} else {
None
}
})
.collect()
}
}
/// Produces a new `Attestation` where:
///
/// - `signature` is added to `Attestation.aggregate_signature`
/// - Attestation.aggregation_bitfield[committee_index]` is set to true.
fn aggregate_attestation(
existing_attestation: &Attestation,
signature: &Signature,
committee_index: usize,
) -> Option<Attestation> {
let already_signed = existing_attestation
.aggregation_bitfield
.get(committee_index)
.unwrap_or(false);
if already_signed {
None
} else {
let mut aggregation_bitfield = existing_attestation.aggregation_bitfield.clone();
aggregation_bitfield.set(committee_index, true);
let mut aggregate_signature = existing_attestation.aggregate_signature.clone();
aggregate_signature.add(&signature);
Some(Attestation {
aggregation_bitfield,
aggregate_signature,
..existing_attestation.clone()
})
}
}

View File

@ -1,4 +1,3 @@
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::errors::{BeaconChainError as Error, BlockProductionError};
use db::{ use db::{
@ -7,9 +6,15 @@ use db::{
}; };
use fork_choice::{ForkChoice, ForkChoiceError}; use fork_choice::{ForkChoice, ForkChoiceError};
use log::{debug, trace}; use log::{debug, trace};
use operation_pool::DepositInsertStatus;
use operation_pool::OperationPool;
use parking_lot::{RwLock, RwLockReadGuard}; use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::ssz_encode; use ssz::ssz_encode;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
use state_processing::{ use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature, per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError, SlotProcessingError, per_slot_processing, BlockProcessingError, SlotProcessingError,
@ -82,12 +87,7 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
pub block_store: Arc<BeaconBlockStore<T>>, pub block_store: Arc<BeaconBlockStore<T>>,
pub state_store: Arc<BeaconStateStore<T>>, pub state_store: Arc<BeaconStateStore<T>>,
pub slot_clock: U, pub slot_clock: U,
pub attestation_aggregator: RwLock<AttestationAggregator>, pub op_pool: OperationPool,
pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
pub exits_for_inclusion: RwLock<Vec<VoluntaryExit>>,
pub transfers_for_inclusion: RwLock<Vec<Transfer>>,
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
canonical_head: RwLock<CheckPoint>, canonical_head: RwLock<CheckPoint>,
finalized_head: RwLock<CheckPoint>, finalized_head: RwLock<CheckPoint>,
pub state: RwLock<BeaconState>, pub state: RwLock<BeaconState>,
@ -129,23 +129,14 @@ where
genesis_state.clone(), genesis_state.clone(),
state_root, state_root,
)); ));
let attestation_aggregator = RwLock::new(AttestationAggregator::new());
genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; genesis_state.build_all_caches(&spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)?;
Ok(Self { Ok(Self {
block_store, block_store,
state_store, state_store,
slot_clock, slot_clock,
attestation_aggregator, op_pool: OperationPool::new(),
deposits_for_inclusion: RwLock::new(vec![]),
exits_for_inclusion: RwLock::new(vec![]),
transfers_for_inclusion: RwLock::new(vec![]),
proposer_slashings_for_inclusion: RwLock::new(vec![]),
attester_slashings_for_inclusion: RwLock::new(vec![]),
state: RwLock::new(genesis_state), state: RwLock::new(genesis_state),
finalized_head, finalized_head,
canonical_head, canonical_head,
@ -299,7 +290,7 @@ where
/// fork-choice rule). /// fork-choice rule).
/// ///
/// It is important to note that the `beacon_state` returned may not match the present slot. It /// It is important to note that the `beacon_state` returned may not match the present slot. It
/// is the state as it was when the head block was recieved, which could be some slots prior to /// is the state as it was when the head block was received, which could be some slots prior to
/// now. /// now.
pub fn head(&self) -> RwLockReadGuard<CheckPoint> { pub fn head(&self) -> RwLockReadGuard<CheckPoint> {
self.canonical_head.read() self.canonical_head.read()
@ -324,6 +315,8 @@ where
per_slot_processing(&mut state, &latest_block_header, &self.spec)?; per_slot_processing(&mut state, &latest_block_header, &self.spec)?;
} }
state.build_all_caches(&self.spec)?;
*self.state.write() = state; *self.state.write() = state;
Ok(()) Ok(())
@ -348,11 +341,17 @@ where
per_slot_processing(&mut *state, &latest_block_header, &self.spec)?; per_slot_processing(&mut *state, &latest_block_header, &self.spec)?;
} }
state.build_epoch_cache(RelativeEpoch::Previous, &self.spec)?;
state.build_epoch_cache(RelativeEpoch::Current, &self.spec)?; state.build_all_caches(&self.spec)?;
state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &self.spec)?;
state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &self.spec)?; Ok(())
state.update_pubkey_cache()?; }
/// Build all of the caches on the current state.
///
/// Ideally this shouldn't be required, however we leave it here for testing.
pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> {
self.state.write().build_all_caches(&self.spec)?;
Ok(()) Ok(())
} }
@ -479,22 +478,37 @@ where
} }
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
trace!("BeaconChain::produce_attestation: shard: {}", shard); trace!("BeaconChain::produce_attestation: shard: {}", shard);
let source_epoch = self.state.read().current_justified_epoch; let state = self.state.read();
let source_root = *self.state.read().get_block_root(
source_epoch.start_slot(self.spec.slots_per_epoch),
&self.spec,
)?;
let target_root = *self.state.read().get_block_root( let current_epoch_start_slot = self
self.state .state
.read()
.slot
.epoch(self.spec.slots_per_epoch)
.start_slot(self.spec.slots_per_epoch);
let target_root = if state.slot == current_epoch_start_slot {
// If we're on the first slot of the state's epoch.
if self.head().beacon_block.slot == state.slot {
// If the current head block is from the current slot, use its block root.
self.head().beacon_block_root
} else {
// If the current head block is not from this slot, use the slot from the previous
// epoch.
*self.state.read().get_block_root(
current_epoch_start_slot - self.spec.slots_per_epoch,
&self.spec,
)?
}
} else {
// If we're not on the first slot of the epoch.
*self
.state
.read() .read()
.slot .get_block_root(current_epoch_start_slot, &self.spec)?
.epoch(self.spec.slots_per_epoch) };
.start_slot(self.spec.slots_per_epoch),
&self.spec,
)?;
Ok(AttestationData { Ok(AttestationData {
slot: self.state.read().slot, slot: self.state.read().slot,
@ -502,261 +516,61 @@ where
beacon_block_root: self.head().beacon_block_root, beacon_block_root: self.head().beacon_block_root,
target_root, target_root,
crosslink_data_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
previous_crosslink: Crosslink { previous_crosslink: state.latest_crosslinks[shard as usize].clone(),
epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), source_epoch: state.current_justified_epoch,
crosslink_data_root: Hash256::zero(), source_root: state.current_justified_root,
},
source_epoch,
source_root,
}) })
} }
/// Validate a `FreeAttestation` and either: /// Accept a new attestation from the network.
/// ///
/// - Create a new `Attestation`. /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation
/// - Aggregate it to an existing `Attestation`. /// if possible.
pub fn process_free_attestation( pub fn process_attestation(
&self, &self,
free_attestation: FreeAttestation, attestation: Attestation,
) -> Result<AggregationOutcome, Error> { ) -> Result<(), AttestationValidationError> {
let aggregation_outcome = self self.op_pool
.attestation_aggregator .insert_attestation(attestation, &*self.state.read(), &self.spec)
.write()
.process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?;
// return if the attestation is invalid
if !aggregation_outcome.valid {
return Ok(aggregation_outcome);
}
// valid attestation, proceed with fork-choice logic
self.fork_choice.write().add_attestation(
free_attestation.validator_index,
&free_attestation.data.beacon_block_root,
&self.spec,
)?;
Ok(aggregation_outcome)
} }
/// Accept some deposit and queue it for inclusion in an appropriate block. /// Accept some deposit and queue it for inclusion in an appropriate block.
pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) { pub fn process_deposit(
// TODO: deposits are not checked for validity; check them. &self,
// deposit: Deposit,
// https://github.com/sigp/lighthouse/issues/276 ) -> Result<DepositInsertStatus, DepositValidationError> {
self.deposits_for_inclusion.write().push(deposit); self.op_pool
} .insert_deposit(deposit, &*self.state.read(), &self.spec)
/// Return a vec of deposits suitable for inclusion in some block.
pub fn get_deposits_for_block(&self) -> Vec<Deposit> {
// TODO: deposits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.deposits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_deposits {
for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let deposits_for_inclusion = &mut self.deposits_for_inclusion.write();
for i in indices_to_delete {
deposits_for_inclusion.remove(i);
}
} }
/// Accept some exit and queue it for inclusion in an appropriate block. /// Accept some exit and queue it for inclusion in an appropriate block.
pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) { pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> {
// TODO: exits are not checked for validity; check them. self.op_pool
// .insert_voluntary_exit(exit, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/276
self.exits_for_inclusion.write().push(exit);
}
/// Return a vec of exits suitable for inclusion in some block.
pub fn get_exits_for_block(&self) -> Vec<VoluntaryExit> {
// TODO: exits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.exits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_exits {
for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let exits_for_inclusion = &mut self.exits_for_inclusion.write();
for i in indices_to_delete {
exits_for_inclusion.remove(i);
}
} }
/// Accept some transfer and queue it for inclusion in an appropriate block. /// Accept some transfer and queue it for inclusion in an appropriate block.
pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) { pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> {
// TODO: transfers are not checked for validity; check them. self.op_pool
// .insert_transfer(transfer, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/276
self.transfers_for_inclusion.write().push(transfer);
}
/// Return a vec of transfers suitable for inclusion in some block.
pub fn get_transfers_for_block(&self) -> Vec<Transfer> {
// TODO: transfers are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.transfers_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_transfers {
for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let transfers_for_inclusion = &mut self.transfers_for_inclusion.write();
for i in indices_to_delete {
transfers_for_inclusion.remove(i);
}
} }
/// Accept some proposer slashing and queue it for inclusion in an appropriate block. /// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) { pub fn process_proposer_slashing(
// TODO: proposer_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.proposer_slashings_for_inclusion
.write()
.push(proposer_slashing);
}
/// Return a vec of proposer slashings suitable for inclusion in some block.
pub fn get_proposer_slashings_for_block(&self) -> Vec<ProposerSlashing> {
// TODO: proposer_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.proposer_slashings_for_inclusion.read().clone()
}
/// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `ProposerSlashings` are not included twice in successive blocks.
pub fn set_proposer_slashings_as_included(
&self, &self,
included_proposer_slashings: &[ProposerSlashing], proposer_slashing: ProposerSlashing,
) { ) -> Result<(), ProposerSlashingValidationError> {
// TODO: method does not take forks into account; consider this. self.op_pool
// .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_proposer_slashings {
for (i, for_inclusion) in self
.proposer_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write();
for i in indices_to_delete {
proposer_slashings_for_inclusion.remove(i);
}
} }
/// Accept some attester slashing and queue it for inclusion in an appropriate block. /// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) { pub fn process_attester_slashing(
// TODO: attester_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.attester_slashings_for_inclusion
.write()
.push(attester_slashing);
}
/// Return a vec of attester slashings suitable for inclusion in some block.
pub fn get_attester_slashings_for_block(&self) -> Vec<AttesterSlashing> {
// TODO: attester_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.attester_slashings_for_inclusion.read().clone()
}
/// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them
/// from the inclusion queue.
///
/// This ensures that `AttesterSlashings` are not included twice in successive blocks.
pub fn set_attester_slashings_as_included(
&self, &self,
included_attester_slashings: &[AttesterSlashing], attester_slashing: AttesterSlashing,
) { ) -> Result<(), AttesterSlashingValidationError> {
// TODO: method does not take forks into account; consider this. self.op_pool
// .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec)
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_attester_slashings {
for (i, for_inclusion) in self
.attester_slashings_for_inclusion
.read()
.iter()
.enumerate()
{
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write();
for i in indices_to_delete {
attester_slashings_for_inclusion.remove(i);
}
}
/// Returns `true` if the given block root has not been processed.
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
Ok(!self.block_store.exists(beacon_block_root)?)
} }
/// Accept some block and attempt to add it to block DAG. /// Accept some block and attempt to add it to block DAG.
@ -832,13 +646,6 @@ where
self.block_store.put(&block_root, &ssz_encode(&block)[..])?; self.block_store.put(&block_root, &ssz_encode(&block)[..])?;
self.state_store.put(&state_root, &ssz_encode(&state)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
// Update the inclusion queues so they aren't re-submitted.
self.set_deposits_as_included(&block.body.deposits[..]);
self.set_transfers_as_included(&block.body.transfers[..]);
self.set_exits_as_included(&block.body.voluntary_exits[..]);
self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]);
self.set_attester_slashings_as_included(&block.body.attester_slashings[..]);
// run the fork_choice add_block logic // run the fork_choice add_block logic
self.fork_choice self.fork_choice
.write() .write()
@ -874,20 +681,13 @@ where
trace!("Finding attestations for new block..."); trace!("Finding attestations for new block...");
let attestations = self
.attestation_aggregator
.read()
.get_attestations_for_state(&state, &self.spec);
trace!(
"Inserting {} attestation(s) into new block.",
attestations.len()
);
let previous_block_root = *state let previous_block_root = *state
.get_block_root(state.slot - 1, &self.spec) .get_block_root(state.slot - 1, &self.spec)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
let (proposer_slashings, attester_slashings) =
self.op_pool.get_slashings(&*self.state.read(), &self.spec);
let mut block = BeaconBlock { let mut block = BeaconBlock {
slot: state.slot, slot: state.slot,
previous_block_root, previous_block_root,
@ -900,16 +700,23 @@ where
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
block_hash: Hash256::zero(), block_hash: Hash256::zero(),
}, },
proposer_slashings: self.get_proposer_slashings_for_block(), proposer_slashings,
attester_slashings: self.get_attester_slashings_for_block(), attester_slashings,
attestations, attestations: self
deposits: self.get_deposits_for_block(), .op_pool
voluntary_exits: self.get_exits_for_block(), .get_attestations(&*self.state.read(), &self.spec),
transfers: self.get_transfers_for_block(), deposits: self.op_pool.get_deposits(&*self.state.read(), &self.spec),
voluntary_exits: self
.op_pool
.get_voluntary_exits(&*self.state.read(), &self.spec),
transfers: self.op_pool.get_transfers(&*self.state.read(), &self.spec),
}, },
}; };
trace!("BeaconChain::produce_block: updating state for new block.",); debug!(
"Produced block with {} attestations, updating state.",
block.body.attestations.len()
);
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
@ -951,6 +758,11 @@ where
Ok(()) Ok(())
} }
/// Returns `true` if the given block root has not been processed.
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
Ok(!self.block_store.exists(beacon_block_root)?)
}
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
/// ///
/// This could be a very expensive operation and should only be done in testing/analysis /// This could be a very expensive operation and should only be done in testing/analysis

View File

@ -1,4 +1,3 @@
mod attestation_aggregator;
mod beacon_chain; mod beacon_chain;
mod checkpoint; mod checkpoint;
mod errors; mod errors;
@ -7,10 +6,13 @@ pub mod test_utils;
pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock};
pub use self::checkpoint::CheckPoint; pub use self::checkpoint::CheckPoint;
pub use self::errors::BeaconChainError; pub use self::errors::{BeaconChainError, BlockProductionError};
pub use attestation_aggregator::Outcome as AggregationOutcome;
pub use db; pub use db;
pub use fork_choice; pub use fork_choice;
pub use parking_lot; pub use parking_lot;
pub use slot_clock; pub use slot_clock;
pub use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
pub use types; pub use types;

View File

@ -47,6 +47,9 @@ test_cases:
states: states:
- slot: 63 - slot: 63
num_validators: 1003 num_validators: 1003
num_previous_epoch_attestations: 0
# slots_per_epoch - attestation_inclusion_delay - skip_slots
num_current_epoch_attestations: 57
slashed_validators: [11, 12, 13, 14, 42] slashed_validators: [11, 12, 13, 14, 42]
exited_validators: [] exited_validators: []
exit_initiated_validators: [50] exit_initiated_validators: [50]

View File

@ -10,8 +10,6 @@ use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use ssz::TreeHash; use ssz::TreeHash;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::sync::Arc; use std::sync::Arc;
use types::{test_utils::TestingBeaconStateBuilder, *}; use types::{test_utils::TestingBeaconStateBuilder, *};
@ -137,51 +135,64 @@ impl BeaconChainHarness {
slot slot
} }
/// Gather the `FreeAttestation`s from the valiators. pub fn gather_attesations(&mut self) -> Vec<Attestation> {
///
/// Note: validators will only produce attestations _once per slot_. So, if you call this twice
/// you'll only get attestations on the first run.
pub fn gather_free_attesations(&mut self) -> Vec<FreeAttestation> {
let present_slot = self.beacon_chain.present_slot(); let present_slot = self.beacon_chain.present_slot();
let state = self.beacon_chain.state.read();
let attesting_validators = self let mut attestations = vec![];
.beacon_chain
.state for committee in state
.read()
.get_crosslink_committees_at_slot(present_slot, &self.spec) .get_crosslink_committees_at_slot(present_slot, &self.spec)
.unwrap() .unwrap()
.iter() {
.fold(vec![], |mut acc, c| { for &validator in &committee.committee {
acc.append(&mut c.committee.clone()); let duties = state
acc .get_attestation_duties(validator, &self.spec)
}); .unwrap()
let attesting_validators: HashSet<usize> = .expect("Attesting validators by definition have duties");
HashSet::from_iter(attesting_validators.iter().cloned());
let free_attestations: Vec<FreeAttestation> = self // Obtain `AttestationData` from the beacon chain.
.validators let data = self
.par_iter_mut() .beacon_chain
.enumerate() .produce_attestation_data(duties.shard)
.filter_map(|(i, validator)| { .unwrap();
if attesting_validators.contains(&i) {
// Advance the validator slot.
validator.set_slot(present_slot);
// Prompt the validator to produce an attestation (if required). // Produce an aggregate signature with a single signature.
validator.produce_free_attestation().ok() let aggregate_signature = {
} else { let message = AttestationDataAndCustodyBit {
None data: data.clone(),
} custody_bit: false,
}) }
.collect(); .hash_tree_root();
let domain = self.spec.get_domain(
state.slot.epoch(self.spec.slots_per_epoch),
Domain::Attestation,
&state.fork,
);
let sig =
Signature::new(&message, domain, &self.validators[validator].keypair.sk);
debug!( let mut agg_sig = AggregateSignature::new();
"Gathered {} FreeAttestations for slot {}.", agg_sig.add(&sig);
free_attestations.len(),
present_slot
);
free_attestations agg_sig
};
let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
aggregation_bitfield.set(duties.committee_index, true);
attestations.push(Attestation {
aggregation_bitfield,
data,
custody_bitfield,
aggregate_signature,
})
}
}
attestations
} }
/// Get the block from the proposer for the slot. /// Get the block from the proposer for the slot.
@ -200,6 +211,7 @@ impl BeaconChainHarness {
// Ensure the validators slot clock is accurate. // Ensure the validators slot clock is accurate.
self.validators[proposer].set_slot(present_slot); self.validators[proposer].set_slot(present_slot);
self.validators[proposer].produce_block().unwrap() self.validators[proposer].produce_block().unwrap()
} }
@ -219,20 +231,23 @@ impl BeaconChainHarness {
}; };
debug!("...block processed by BeaconChain."); debug!("...block processed by BeaconChain.");
debug!("Producing free attestations..."); debug!("Producing attestations...");
// Produce new attestations. // Produce new attestations.
let free_attestations = self.gather_free_attesations(); let attestations = self.gather_attesations();
debug!("Processing free attestations..."); debug!("Processing {} attestations...", attestations.len());
free_attestations.par_iter().for_each(|free_attestation| { attestations
self.beacon_chain .par_iter()
.process_free_attestation(free_attestation.clone()) .enumerate()
.unwrap(); .for_each(|(i, attestation)| {
}); self.beacon_chain
.process_attestation(attestation.clone())
.unwrap_or_else(|_| panic!("Attestation {} invalid: {:?}", i, attestation));
});
debug!("Free attestations processed."); debug!("Attestations processed.");
block block
} }
@ -285,7 +300,7 @@ impl BeaconChainHarness {
/// If a new `ValidatorHarness` was created, the validator should become fully operational as /// If a new `ValidatorHarness` was created, the validator should become fully operational as
/// if the validator were created during `BeaconChainHarness` instantiation. /// if the validator were created during `BeaconChainHarness` instantiation.
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) { pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
self.beacon_chain.receive_deposit_for_inclusion(deposit); self.beacon_chain.process_deposit(deposit).unwrap();
// If a keypair is present, add a new `ValidatorHarness` to the rig. // If a keypair is present, add a new `ValidatorHarness` to the rig.
if let Some(keypair) = keypair { if let Some(keypair) = keypair {
@ -301,24 +316,26 @@ impl BeaconChainHarness {
/// will stop receiving duties from the beacon chain and just do nothing when prompted to /// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest. /// produce/attest.
pub fn add_exit(&mut self, exit: VoluntaryExit) { pub fn add_exit(&mut self, exit: VoluntaryExit) {
self.beacon_chain.receive_exit_for_inclusion(exit); self.beacon_chain.process_voluntary_exit(exit).unwrap();
} }
/// Submit an transfer to the `BeaconChain` for inclusion in some block. /// Submit an transfer to the `BeaconChain` for inclusion in some block.
pub fn add_transfer(&mut self, transfer: Transfer) { pub fn add_transfer(&mut self, transfer: Transfer) {
self.beacon_chain.receive_transfer_for_inclusion(transfer); self.beacon_chain.process_transfer(transfer).unwrap();
} }
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block. /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) { pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain self.beacon_chain
.receive_proposer_slashing_for_inclusion(proposer_slashing); .process_proposer_slashing(proposer_slashing)
.unwrap();
} }
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block. /// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) { pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
self.beacon_chain self.beacon_chain
.receive_attester_slashing_for_inclusion(attester_slashing); .process_attester_slashing(attester_slashing)
.unwrap();
} }
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head. /// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.

View File

@ -16,6 +16,10 @@ pub struct StateCheck {
pub slot: Slot, pub slot: Slot,
/// Checked against `beacon_state.validator_registry.len()`. /// Checked against `beacon_state.validator_registry.len()`.
pub num_validators: Option<usize>, pub num_validators: Option<usize>,
/// The number of pending attestations from the previous epoch that should be in the state.
pub num_previous_epoch_attestations: Option<usize>,
/// The number of pending attestations from the current epoch that should be in the state.
pub num_current_epoch_attestations: Option<usize>,
/// A list of validator indices which have been penalized. Must be in ascending order. /// A list of validator indices which have been penalized. Must be in ascending order.
pub slashed_validators: Option<Vec<u64>>, pub slashed_validators: Option<Vec<u64>>,
/// A list of validator indices which have been fully exited. Must be in ascending order. /// A list of validator indices which have been fully exited. Must be in ascending order.
@ -34,6 +38,8 @@ impl StateCheck {
Self { Self {
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")), slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
num_validators: as_usize(&yaml, "num_validators"), num_validators: as_usize(&yaml, "num_validators"),
num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"),
num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"),
slashed_validators: as_vec_u64(&yaml, "slashed_validators"), slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"), exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"), exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
@ -46,6 +52,7 @@ impl StateCheck {
/// # Panics /// # Panics
/// ///
/// Panics with an error message if any test fails. /// Panics with an error message if any test fails.
#[allow(clippy::cyclomatic_complexity)]
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) { pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
let state_epoch = state.slot.epoch(spec.slots_per_epoch); let state_epoch = state.slot.epoch(spec.slots_per_epoch);
@ -58,6 +65,7 @@ impl StateCheck {
"State slot is invalid." "State slot is invalid."
); );
// Check the validator count
if let Some(num_validators) = self.num_validators { if let Some(num_validators) = self.num_validators {
assert_eq!( assert_eq!(
state.validator_registry.len(), state.validator_registry.len(),
@ -67,6 +75,26 @@ impl StateCheck {
info!("OK: num_validators = {}.", num_validators); info!("OK: num_validators = {}.", num_validators);
} }
// Check the previous epoch attestations
if let Some(n) = self.num_previous_epoch_attestations {
assert_eq!(
state.previous_epoch_attestations.len(),
n,
"previous epoch attestations count != expected."
);
info!("OK: num_previous_epoch_attestations = {}.", n);
}
// Check the current epoch attestations
if let Some(n) = self.num_current_epoch_attestations {
assert_eq!(
state.current_epoch_attestations.len(),
n,
"current epoch attestations count != expected."
);
info!("OK: num_current_epoch_attestations = {}.", n);
}
// Check for slashed validators. // Check for slashed validators.
if let Some(ref slashed_validators) = self.slashed_validators { if let Some(ref slashed_validators) = self.slashed_validators {
let actually_slashed_validators: Vec<u64> = state let actually_slashed_validators: Vec<u64> = state

View File

@ -14,9 +14,6 @@ use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot}; use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
// mod attester;
// mod producer;
/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit /// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit
/// blocks/attestations. /// blocks/attestations.
/// ///
@ -42,20 +39,15 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectBeaconNode<T, U, F> {
pub fn last_published_block(&self) -> Option<BeaconBlock> { pub fn last_published_block(&self) -> Option<BeaconBlock> {
Some(self.published_blocks.read().last()?.clone()) Some(self.published_blocks.read().last()?.clone())
} }
/// Get the last published attestation (if any).
pub fn last_published_free_attestation(&self) -> Option<FreeAttestation> {
Some(self.published_attestations.read().last()?.clone())
}
} }
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> { impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> {
fn produce_attestation( fn produce_attestation_data(
&self, &self,
_slot: Slot, _slot: Slot,
shard: u64, shard: u64,
) -> Result<Option<AttestationData>, NodeError> { ) -> Result<Option<AttestationData>, NodeError> {
match self.beacon_chain.produce_attestation(shard) { match self.beacon_chain.produce_attestation_data(shard) {
Ok(attestation_data) => Ok(Some(attestation_data)), Ok(attestation_data) => Ok(Some(attestation_data)),
Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))), Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))),
} }

View File

@ -2,8 +2,7 @@ mod direct_beacon_node;
mod direct_duties; mod direct_duties;
mod local_signer; mod local_signer;
use attester::PollOutcome as AttestationPollOutcome; use attester::Attester;
use attester::{Attester, Error as AttestationPollError};
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use block_proposer::PollOutcome as BlockPollOutcome; use block_proposer::PollOutcome as BlockPollOutcome;
use block_proposer::{BlockProducer, Error as BlockPollError}; use block_proposer::{BlockProducer, Error as BlockPollError};
@ -14,7 +13,7 @@ use fork_choice::BitwiseLMDGhost;
use local_signer::LocalSigner; use local_signer::LocalSigner;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot}; use types::{BeaconBlock, ChainSpec, Keypair, Slot};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BlockProduceError { pub enum BlockProduceError {
@ -22,12 +21,6 @@ pub enum BlockProduceError {
PollError(BlockPollError), PollError(BlockPollError),
} }
#[derive(Debug, PartialEq)]
pub enum AttestationProduceError {
DidNotProduce(AttestationPollOutcome),
PollError(AttestationPollError),
}
type TestingBlockProducer = BlockProducer< type TestingBlockProducer = BlockProducer<
TestingSlotClock, TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>, DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
@ -117,21 +110,6 @@ impl ValidatorHarness {
.expect("Unable to obtain produced block.")) .expect("Unable to obtain produced block."))
} }
/// Run the `poll` function on the `Attester` and produce a `FreeAttestation`.
///
/// An error is returned if the attester refuses to attest.
pub fn produce_free_attestation(&mut self) -> Result<FreeAttestation, AttestationProduceError> {
match self.attester.poll() {
Ok(AttestationPollOutcome::AttestationProduced(_)) => {}
Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)),
Err(error) => return Err(AttestationProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_free_attestation()
.expect("Unable to obtain produced attestation."))
}
/// Set the validators slot clock to the specified slot. /// Set the validators slot clock to the specified slot.
/// ///
/// The validators slot clock will always read this value until it is set to something else. /// The validators slot clock will always read this value until it is set to something else.

View File

@ -10,6 +10,7 @@ use std::path::PathBuf;
use types::multiaddr::Protocol; use types::multiaddr::Protocol;
use types::multiaddr::ToMultiaddr; use types::multiaddr::ToMultiaddr;
use types::ChainSpec; use types::ChainSpec;
use types::Multiaddr;
/// Stores the client configuration for this Lighthouse instance. /// Stores the client configuration for this Lighthouse instance.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -76,7 +77,7 @@ impl ClientConfig {
} }
// Custom listening address ipv4/ipv6 // Custom listening address ipv4/ipv6
// TODO: Handle list of addresses // TODO: Handle list of addresses
if let Some(listen_address_str) = args.value_of("listen_address") { if let Some(listen_address_str) = args.value_of("listen-address") {
if let Ok(listen_address) = listen_address_str.parse::<IpAddr>() { if let Ok(listen_address) = listen_address_str.parse::<IpAddr>() {
let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port) let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port)
.to_multiaddr() .to_multiaddr()
@ -88,6 +89,17 @@ impl ClientConfig {
} }
} }
// Custom bootnodes
// TODO: Handle list of addresses
if let Some(boot_addresses_str) = args.value_of("boot-nodes") {
if let Ok(boot_address) = boot_addresses_str.parse::<Multiaddr>() {
config.net_conf.boot_nodes.append(&mut vec![boot_address]);
} else {
error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str);
return Err("Invalid IP Address");
}
}
/* Filesystem related arguments */ /* Filesystem related arguments */
// Custom datadir // Custom datadir

View File

@ -15,21 +15,19 @@ use futures::{future::Future, Stream};
use network::Service as NetworkService; use network::Service as NetworkService;
use slog::{error, info, o}; use slog::{error, info, o};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::TreeHash;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
use tokio::timer::Interval; use tokio::timer::Interval;
use types::Hash256;
/// Main beacon node client service. This provides the connection and initialisation of the clients /// Main beacon node client service. This provides the connection and initialisation of the clients
/// sub-services in multiple threads. /// sub-services in multiple threads.
pub struct Client<T: ClientTypes> { pub struct Client<T: ClientTypes> {
/// Configuration for the lighthouse client. /// Configuration for the lighthouse client.
config: ClientConfig, _config: ClientConfig,
/// The beacon chain for the running client. /// The beacon chain for the running client.
beacon_chain: Arc<BeaconChain<T::DB, T::SlotClock, T::ForkChoice>>, _beacon_chain: Arc<BeaconChain<T::DB, T::SlotClock, T::ForkChoice>>,
/// Reference to the network service. /// Reference to the network service.
pub network: Arc<NetworkService>, pub network: Arc<NetworkService>,
/// Signal to terminate the RPC server. /// Signal to terminate the RPC server.
@ -92,17 +90,18 @@ impl<TClientType: ClientTypes> Client<TClientType> {
network_logger, network_logger,
)?; )?;
let mut rpc_exit_signal = None;
// spawn the RPC server // spawn the RPC server
if config.rpc_conf.enabled { let rpc_exit_signal = if config.rpc_conf.enabled {
rpc_exit_signal = Some(rpc::start_server( Some(rpc::start_server(
&config.rpc_conf, &config.rpc_conf,
executor, executor,
network_send, network_send,
beacon_chain.clone(), beacon_chain.clone(),
&log, &log,
)); ))
} } else {
None
};
let (slot_timer_exit_signal, exit) = exit_future::signal(); let (slot_timer_exit_signal, exit) = exit_future::signal();
if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() {
@ -131,8 +130,8 @@ impl<TClientType: ClientTypes> Client<TClientType> {
} }
Ok(Client { Ok(Client {
config, _config: config,
beacon_chain, _beacon_chain: beacon_chain,
rpc_exit_signal, rpc_exit_signal,
slot_timer_exit_signal: Some(slot_timer_exit_signal), slot_timer_exit_signal: Some(slot_timer_exit_signal),
log, log,

View File

@ -14,7 +14,7 @@ pub fn run<T: ClientTypes>(client: &Client<T>, executor: TaskExecutor, exit: Exi
// notification heartbeat // notification heartbeat
let interval = Interval::new(Instant::now(), Duration::from_secs(5)); let interval = Interval::new(Instant::now(), Duration::from_secs(5));
let log = client.log.new(o!("Service" => "Notifier")); let _log = client.log.new(o!("Service" => "Notifier"));
// TODO: Debugging only // TODO: Debugging only
let counter = Arc::new(Mutex::new(0)); let counter = Arc::new(Mutex::new(0));
@ -22,13 +22,13 @@ pub fn run<T: ClientTypes>(client: &Client<T>, executor: TaskExecutor, exit: Exi
// build heartbeat logic here // build heartbeat logic here
let heartbeat = move |_| { let heartbeat = move |_| {
debug!(log, "Temp heartbeat output"); //debug!(log, "Temp heartbeat output");
//TODO: Remove this logic. Testing only //TODO: Remove this logic. Testing only
let mut count = counter.lock().unwrap(); let mut count = counter.lock().unwrap();
*count += 1; *count += 1;
if *count % 5 == 0 { if *count % 5 == 0 {
debug!(log, "Sending Message"); // debug!(log, "Sending Message");
network.send_message(); network.send_message();
} }

View File

@ -1,6 +1,6 @@
use super::BLOCKS_DB_COLUMN as DB_COLUMN; use super::BLOCKS_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError}; use super::{ClientDB, DBError};
use ssz::Decodable; use ssz::decode;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, Hash256, Slot}; use types::{BeaconBlock, Hash256, Slot};
@ -30,7 +30,7 @@ impl<T: ClientDB> BeaconBlockStore<T> {
match self.get(&hash)? { match self.get(&hash)? {
None => Ok(None), None => Ok(None),
Some(ssz) => { Some(ssz) => {
let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { let block = decode::<BeaconBlock>(&ssz).map_err(|_| DBError {
message: "Bad BeaconBlock SSZ.".to_string(), message: "Bad BeaconBlock SSZ.".to_string(),
})?; })?;
Ok(Some(block)) Ok(Some(block))

View File

@ -1,6 +1,6 @@
use super::STATES_DB_COLUMN as DB_COLUMN; use super::STATES_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError}; use super::{ClientDB, DBError};
use ssz::Decodable; use ssz::decode;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconState, Hash256}; use types::{BeaconState, Hash256};
@ -23,7 +23,7 @@ impl<T: ClientDB> BeaconStateStore<T> {
match self.get(&hash)? { match self.get(&hash)? {
None => Ok(None), None => Ok(None),
Some(ssz) => { Some(ssz) => {
let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { let state = decode::<BeaconState>(&ssz).map_err(|_| DBError {
message: "Bad State SSZ.".to_string(), message: "Bad State SSZ.".to_string(),
})?; })?;
Ok(Some(state)) Ok(Some(state))

View File

@ -4,7 +4,7 @@ use self::bytes::{BufMut, BytesMut};
use super::VALIDATOR_DB_COLUMN as DB_COLUMN; use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError}; use super::{ClientDB, DBError};
use bls::PublicKey; use bls::PublicKey;
use ssz::{ssz_encode, Decodable}; use ssz::{decode, ssz_encode};
use std::sync::Arc; use std::sync::Arc;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -69,8 +69,8 @@ impl<T: ClientDB> ValidatorStore<T> {
let val = self.db.get(DB_COLUMN, &key[..])?; let val = self.db.get(DB_COLUMN, &key[..])?;
match val { match val {
None => Ok(None), None => Ok(None),
Some(val) => match PublicKey::ssz_decode(&val, 0) { Some(val) => match decode::<PublicKey>(&val) {
Ok((key, _)) => Ok(Some(key)), Ok(key) => Ok(Some(key)),
Err(_) => Err(ValidatorStoreError::DecodeError), Err(_) => Err(ValidatorStoreError::DecodeError),
}, },
} }

View File

@ -1,4 +1,3 @@
use crate::rpc::methods::BlockRootSlot;
use crate::rpc::{RPCEvent, RPCMessage, Rpc}; use crate::rpc::{RPCEvent, RPCMessage, Rpc};
use crate::NetworkConfig; use crate::NetworkConfig;
use futures::prelude::*; use futures::prelude::*;
@ -13,10 +12,9 @@ use libp2p::{
tokio_io::{AsyncRead, AsyncWrite}, tokio_io::{AsyncRead, AsyncWrite},
NetworkBehaviour, PeerId, NetworkBehaviour, PeerId,
}; };
use slog::{debug, o, warn}; use slog::{debug, o, trace, warn};
use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream};
use ssz_derive::{Decode, Encode}; use types::{Attestation, BeaconBlock};
use types::Attestation;
use types::{Topic, TopicHash}; use types::{Topic, TopicHash};
/// Builds the network behaviour for the libp2p Swarm. /// Builds the network behaviour for the libp2p Swarm.
@ -49,7 +47,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
fn inject_event(&mut self, event: GossipsubEvent) { fn inject_event(&mut self, event: GossipsubEvent) {
match event { match event {
GossipsubEvent::Message(gs_msg) => { GossipsubEvent::Message(gs_msg) => {
debug!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg));
let pubsub_message = match PubsubMessage::ssz_decode(&gs_msg.data, 0) { let pubsub_message = match PubsubMessage::ssz_decode(&gs_msg.data, 0) {
//TODO: Punish peer on error //TODO: Punish peer on error
@ -67,17 +65,11 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
self.events.push(BehaviourEvent::GossipMessage { self.events.push(BehaviourEvent::GossipMessage {
source: gs_msg.source, source: gs_msg.source,
topics: gs_msg.topics, topics: gs_msg.topics,
message: pubsub_message, message: Box::new(pubsub_message),
}); });
} }
GossipsubEvent::Subscribed { GossipsubEvent::Subscribed { .. } => {}
peer_id: _, GossipsubEvent::Unsubscribed { .. } => {}
topic: _,
}
| GossipsubEvent::Unsubscribed {
peer_id: _,
topic: _,
} => {}
} }
} }
} }
@ -112,7 +104,8 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<IdentifyEv
); );
info.listen_addrs.truncate(20); info.listen_addrs.truncate(20);
} }
self.events.push(BehaviourEvent::Identified(peer_id, info)); self.events
.push(BehaviourEvent::Identified(peer_id, Box::new(info)));
} }
IdentifyEvent::Error { .. } => {} IdentifyEvent::Error { .. } => {}
IdentifyEvent::SendBack { .. } => {} IdentifyEvent::SendBack { .. } => {}
@ -185,12 +178,12 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
pub enum BehaviourEvent { pub enum BehaviourEvent {
RPC(PeerId, RPCEvent), RPC(PeerId, RPCEvent),
PeerDialed(PeerId), PeerDialed(PeerId),
Identified(PeerId, IdentifyInfo), Identified(PeerId, Box<IdentifyInfo>),
// TODO: This is a stub at the moment // TODO: This is a stub at the moment
GossipMessage { GossipMessage {
source: PeerId, source: PeerId,
topics: Vec<TopicHash>, topics: Vec<TopicHash>,
message: PubsubMessage, message: Box<PubsubMessage>,
}, },
} }
@ -198,7 +191,7 @@ pub enum BehaviourEvent {
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage { pub enum PubsubMessage {
/// Gossipsub message providing notification of a new block. /// Gossipsub message providing notification of a new block.
Block(BlockRootSlot), Block(BeaconBlock),
/// Gossipsub message providing notification of a new attestation. /// Gossipsub message providing notification of a new attestation.
Attestation(Attestation), Attestation(Attestation),
} }
@ -224,7 +217,7 @@ impl Decodable for PubsubMessage {
let (id, index) = u32::ssz_decode(bytes, index)?; let (id, index) = u32::ssz_decode(bytes, index)?;
match id { match id {
0 => { 0 => {
let (block, index) = BlockRootSlot::ssz_decode(bytes, index)?; let (block, index) = BeaconBlock::ssz_decode(bytes, index)?;
Ok((PubsubMessage::Block(block), index)) Ok((PubsubMessage::Block(block), index))
} }
1 => { 1 => {
@ -243,10 +236,7 @@ mod test {
#[test] #[test]
fn ssz_encoding() { fn ssz_encoding() {
let original = PubsubMessage::Block(BlockRootSlot { let original = PubsubMessage::Block(BeaconBlock::empty(&ChainSpec::foundation()));
block_root: Hash256::from_slice(&[42; 32]),
slot: Slot::new(4),
});
let encoded = ssz_encode(&original); let encoded = ssz_encode(&original);

View File

@ -1,7 +1,7 @@
use ssz::{Decodable, DecodeError, Encodable, SszStream}; use ssz::{Decodable, DecodeError, Encodable, SszStream};
/// Available RPC methods types and ids. /// Available RPC methods types and ids.
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{Attestation, BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot}; use types::{BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot};
#[derive(Debug)] #[derive(Debug)]
/// Available Serenity Libp2p RPC methods /// Available Serenity Libp2p RPC methods
@ -179,6 +179,19 @@ pub struct BeaconBlockRootsResponse {
pub roots: Vec<BlockRootSlot>, pub roots: Vec<BlockRootSlot>,
} }
impl BeaconBlockRootsResponse {
/// Returns `true` if each `self.roots.slot[i]` is higher than the preceeding `i`.
pub fn slots_are_ascending(&self) -> bool {
for i in 1..self.roots.len() {
if self.roots[i - 1].slot >= self.roots[i].slot {
return false;
}
}
true
}
}
/// Contains a block root and associated slot. /// Contains a block root and associated slot.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlockRootSlot { pub struct BlockRootSlot {

View File

@ -26,7 +26,7 @@ pub struct Rpc<TSubstream> {
/// Pins the generic substream. /// Pins the generic substream.
marker: PhantomData<TSubstream>, marker: PhantomData<TSubstream>,
/// Slog logger for RPC behaviour. /// Slog logger for RPC behaviour.
log: slog::Logger, _log: slog::Logger,
} }
impl<TSubstream> Rpc<TSubstream> { impl<TSubstream> Rpc<TSubstream> {
@ -35,7 +35,7 @@ impl<TSubstream> Rpc<TSubstream> {
Rpc { Rpc {
events: Vec::new(), events: Vec::new(),
marker: PhantomData, marker: PhantomData,
log, _log: log,
} }
} }
@ -65,7 +65,7 @@ where
fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) {
// if initialised the connection, report this upwards to send the HELLO request // if initialised the connection, report this upwards to send the HELLO request
if let ConnectedPoint::Dialer { address: _ } = connected_point { if let ConnectedPoint::Dialer { .. } = connected_point {
self.events.push(NetworkBehaviourAction::GenerateEvent( self.events.push(NetworkBehaviourAction::GenerateEvent(
RPCMessage::PeerDialed(peer_id), RPCMessage::PeerDialed(peer_id),
)); ));

View File

@ -31,7 +31,7 @@ impl Default for RPCProtocol {
} }
/// A monotonic counter for ordering `RPCRequest`s. /// A monotonic counter for ordering `RPCRequest`s.
#[derive(Debug, Clone, PartialEq, Default)] #[derive(Debug, Clone, Default)]
pub struct RequestId(u64); pub struct RequestId(u64);
impl RequestId { impl RequestId {
@ -48,6 +48,12 @@ impl RequestId {
impl Eq for RequestId {} impl Eq for RequestId {}
impl PartialEq for RequestId {
fn eq(&self, other: &RequestId) -> bool {
self.0 == other.0
}
}
impl Hash for RequestId { impl Hash for RequestId {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state); self.0.hash(state);
@ -104,17 +110,15 @@ impl UpgradeInfo for RPCEvent {
} }
} }
type FnDecodeRPCEvent = fn(Vec<u8>, ()) -> Result<RPCEvent, DecodeError>;
impl<TSocket> InboundUpgrade<TSocket> for RPCProtocol impl<TSocket> InboundUpgrade<TSocket> for RPCProtocol
where where
TSocket: AsyncRead + AsyncWrite, TSocket: AsyncRead + AsyncWrite,
{ {
type Output = RPCEvent; type Output = RPCEvent;
type Error = DecodeError; type Error = DecodeError;
type Future = upgrade::ReadOneThen< type Future = upgrade::ReadOneThen<upgrade::Negotiated<TSocket>, (), FnDecodeRPCEvent>;
upgrade::Negotiated<TSocket>,
(),
fn(Vec<u8>, ()) -> Result<RPCEvent, DecodeError>,
>;
fn upgrade_inbound(self, socket: upgrade::Negotiated<TSocket>, _: Self::Info) -> Self::Future { fn upgrade_inbound(self, socket: upgrade::Negotiated<TSocket>, _: Self::Info) -> Self::Future {
upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?)) upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?))

View File

@ -19,13 +19,16 @@ use std::io::{Error, ErrorKind};
use std::time::Duration; use std::time::Duration;
use types::{TopicBuilder, TopicHash}; use types::{TopicBuilder, TopicHash};
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
/// The configuration and state of the libp2p components for the beacon node. /// The configuration and state of the libp2p components for the beacon node.
pub struct Service { pub struct Service {
/// The libp2p Swarm handler. /// The libp2p Swarm handler.
//TODO: Make this private //TODO: Make this private
pub swarm: Swarm<Boxed<(PeerId, StreamMuxerBox), Error>, Behaviour<Substream<StreamMuxerBox>>>, pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
/// This node's PeerId. /// This node's PeerId.
local_peer_id: PeerId, _local_peer_id: PeerId,
/// The libp2p logger handle. /// The libp2p logger handle.
pub log: slog::Logger, pub log: slog::Logger,
} }
@ -89,7 +92,7 @@ impl Service {
info!(log, "Subscribed to topics: {:?}", subscribed_topics); info!(log, "Subscribed to topics: {:?}", subscribed_topics);
Ok(Service { Ok(Service {
local_peer_id, _local_peer_id: local_peer_id,
swarm, swarm,
log, log,
}) })
@ -113,7 +116,7 @@ impl Stream for Service {
topics, topics,
message, message,
} => { } => {
debug!(self.log, "Pubsub message received: {:?}", message); trace!(self.log, "Pubsub message received: {:?}", message);
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
source, source,
topics, topics,
@ -179,11 +182,11 @@ pub enum Libp2pEvent {
/// Initiated the connection to a new peer. /// Initiated the connection to a new peer.
PeerDialed(PeerId), PeerDialed(PeerId),
/// Received information about a peer on the network. /// Received information about a peer on the network.
Identified(PeerId, IdentifyInfo), Identified(PeerId, Box<IdentifyInfo>),
/// Received pubsub message. /// Received pubsub message.
PubsubMessage { PubsubMessage {
source: PeerId, source: PeerId,
topics: Vec<TopicHash>, topics: Vec<TopicHash>,
message: PubsubMessage, message: Box<PubsubMessage>,
}, },
} }

View File

@ -13,7 +13,7 @@ beacon_chain = { path = "../beacon_chain" }
eth2-libp2p = { path = "../eth2-libp2p" } eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" } version = { path = "../version" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
slog = "2.4.1" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
ssz = { path = "../../eth2/utils/ssz" } ssz = { path = "../../eth2/utils/ssz" }
futures = "0.1.25" futures = "0.1.25"
error-chain = "0.12.0" error-chain = "0.12.0"

View File

@ -5,12 +5,12 @@ use beacon_chain::{
parking_lot::RwLockReadGuard, parking_lot::RwLockReadGuard,
slot_clock::SlotClock, slot_clock::SlotClock,
types::{BeaconState, ChainSpec}, types::{BeaconState, ChainSpec},
AggregationOutcome, CheckPoint, AttestationValidationError, CheckPoint,
}; };
use eth2_libp2p::rpc::HelloMessage; use eth2_libp2p::rpc::HelloMessage;
use types::{Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot}; use types::{Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot};
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome}; pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome, InvalidBlock};
/// The network's API to the beacon chain. /// The network's API to the beacon chain.
pub trait BeaconChain: Send + Sync { pub trait BeaconChain: Send + Sync {
@ -40,7 +40,7 @@ pub trait BeaconChain: Send + Sync {
fn process_attestation( fn process_attestation(
&self, &self,
attestation: Attestation, attestation: Attestation,
) -> Result<AggregationOutcome, BeaconChainError>; ) -> Result<(), AttestationValidationError>;
fn get_block_roots( fn get_block_roots(
&self, &self,
@ -126,14 +126,9 @@ where
fn process_attestation( fn process_attestation(
&self, &self,
_attestation: Attestation, attestation: Attestation,
) -> Result<AggregationOutcome, BeaconChainError> { ) -> Result<(), AttestationValidationError> {
// Awaiting a proper operations pool before we can import attestations. self.process_attestation(attestation)
//
// Returning a useless error for now.
//
// https://github.com/sigp/lighthouse/issues/281
return Err(BeaconChainError::DBInconsistent("CANNOT PROCESS".into()));
} }
fn get_block_roots( fn get_block_roots(

View File

@ -41,7 +41,7 @@ pub enum HandlerMessage {
/// An RPC response/request has been received. /// An RPC response/request has been received.
RPC(PeerId, RPCEvent), RPC(PeerId, RPCEvent),
/// A gossip message has been received. /// A gossip message has been received.
PubsubMessage(PeerId, PubsubMessage), PubsubMessage(PeerId, Box<PubsubMessage>),
} }
impl MessageHandler { impl MessageHandler {
@ -93,7 +93,7 @@ impl MessageHandler {
} }
// we have received an RPC message request/response // we have received an RPC message request/response
HandlerMessage::PubsubMessage(peer_id, gossip) => { HandlerMessage::PubsubMessage(peer_id, gossip) => {
self.handle_gossip(peer_id, gossip); self.handle_gossip(peer_id, *gossip);
} }
//TODO: Handle all messages //TODO: Handle all messages
_ => {} _ => {}
@ -208,8 +208,9 @@ impl MessageHandler {
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) {
match gossip_message { match gossip_message {
PubsubMessage::Block(message) => { PubsubMessage::Block(message) => {
self.sync let _should_foward_on =
.on_block_gossip(peer_id, message, &mut self.network_context) self.sync
.on_block_gossip(peer_id, message, &mut self.network_context);
} }
PubsubMessage::Attestation(message) => { PubsubMessage::Attestation(message) => {
self.sync self.sync

View File

@ -17,7 +17,7 @@ use types::Topic;
/// Service that handles communication between internal services and the eth2_libp2p network service. /// Service that handles communication between internal services and the eth2_libp2p network service.
pub struct Service { pub struct Service {
//libp2p_service: Arc<Mutex<LibP2PService>>, //libp2p_service: Arc<Mutex<LibP2PService>>,
libp2p_exit: oneshot::Sender<()>, _libp2p_exit: oneshot::Sender<()>,
network_send: crossbeam_channel::Sender<NetworkMessage>, network_send: crossbeam_channel::Sender<NetworkMessage>,
//message_handler: MessageHandler, //message_handler: MessageHandler,
//message_handler_send: Sender<HandlerMessage>, //message_handler_send: Sender<HandlerMessage>,
@ -54,7 +54,7 @@ impl Service {
log, log,
)?; )?;
let network_service = Service { let network_service = Service {
libp2p_exit, _libp2p_exit: libp2p_exit,
network_send: network_send.clone(), network_send: network_send.clone(),
}; };
@ -131,9 +131,7 @@ fn network_service(
); );
} }
Libp2pEvent::PubsubMessage { Libp2pEvent::PubsubMessage {
source, source, message, ..
topics: _,
message,
} => { } => {
//TODO: Decide if we need to propagate the topic upwards. (Potentially for //TODO: Decide if we need to propagate the topic upwards. (Potentially for
//attestations) //attestations)
@ -161,13 +159,13 @@ fn network_service(
libp2p_service.swarm.send_rpc(peer_id, rpc_event); libp2p_service.swarm.send_rpc(peer_id, rpc_event);
} }
OutgoingMessage::NotifierTest => { OutgoingMessage::NotifierTest => {
debug!(log, "Received message from notifier"); // debug!(log, "Received message from notifier");
} }
}; };
} }
Ok(NetworkMessage::Publish { topics, message }) => { Ok(NetworkMessage::Publish { topics, message }) => {
debug!(log, "Sending pubsub message on topics {:?}", topics); debug!(log, "Sending pubsub message on topics {:?}", topics);
libp2p_service.swarm.publish(topics, message); libp2p_service.swarm.publish(topics, *message);
} }
Err(TryRecvError::Empty) => break, Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => { Err(TryRecvError::Disconnected) => {
@ -190,7 +188,7 @@ pub enum NetworkMessage {
/// Publish a message to pubsub mechanism. /// Publish a message to pubsub mechanism.
Publish { Publish {
topics: Vec<Topic>, topics: Vec<Topic>,
message: PubsubMessage, message: Box<PubsubMessage>,
}, },
} }

View File

@ -5,7 +5,7 @@ use slog::{debug, error};
use ssz::TreeHash; use ssz::TreeHash;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256}; use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
/// Provides a queue for fully and partially built `BeaconBlock`s. /// Provides a queue for fully and partially built `BeaconBlock`s.
/// ///
@ -104,7 +104,7 @@ impl ImportQueue {
} }
/// Returns `true` if `self.chain` has not yet processed this block. /// Returns `true` if `self.chain` has not yet processed this block.
pub fn is_new_block(&self, block_root: &Hash256) -> bool { pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool {
self.chain self.chain
.is_new_block_root(&block_root) .is_new_block_root(&block_root)
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
@ -113,11 +113,36 @@ impl ImportQueue {
}) })
} }
/// Returns the index of the first new root in the list of block roots. /// Adds the `block_roots` to the partials queue.
pub fn first_new_root(&mut self, roots: &[BlockRootSlot]) -> Option<usize> { ///
roots /// If a `block_root` is not in the queue and has not been processed by the chain it is added
/// to the queue and it's block root is included in the output.
pub fn enqueue_block_roots(
&mut self,
block_roots: &[BlockRootSlot],
sender: PeerId,
) -> Vec<BlockRootSlot> {
let new_roots: Vec<BlockRootSlot> = block_roots
.iter() .iter()
.position(|brs| self.is_new_block(&brs.block_root)) // Ignore any roots already processed by the chain.
.filter(|brs| self.chain_has_not_seen_block(&brs.block_root))
// Ignore any roots already stored in the queue.
.filter(|brs| !self.partials.iter().any(|p| p.block_root == brs.block_root))
.cloned()
.collect();
new_roots.iter().for_each(|brs| {
self.partials.push(PartialBeaconBlock {
slot: brs.slot,
block_root: brs.block_root,
sender: sender.clone(),
header: None,
body: None,
inserted: Instant::now(),
})
});
new_roots
} }
/// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for /// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for
@ -143,7 +168,7 @@ impl ImportQueue {
for header in headers { for header in headers {
let block_root = Hash256::from_slice(&header.hash_tree_root()[..]); let block_root = Hash256::from_slice(&header.hash_tree_root()[..]);
if self.is_new_block(&block_root) { if self.chain_has_not_seen_block(&block_root) {
self.insert_header(block_root, header, sender.clone()); self.insert_header(block_root, header, sender.clone());
required_bodies.push(block_root) required_bodies.push(block_root)
} }
@ -161,6 +186,12 @@ impl ImportQueue {
} }
} }
pub fn enqueue_full_blocks(&mut self, blocks: Vec<BeaconBlock>, sender: PeerId) {
for block in blocks {
self.insert_full_block(block, sender.clone());
}
}
/// Inserts a header to the queue. /// Inserts a header to the queue.
/// ///
/// If the header already exists, the `inserted` time is set to `now` and not other /// If the header already exists, the `inserted` time is set to `now` and not other
@ -171,11 +202,21 @@ impl ImportQueue {
.iter() .iter()
.position(|p| p.block_root == block_root) .position(|p| p.block_root == block_root)
{ {
// Case 1: there already exists a partial with a matching block root.
//
// The `inserted` time is set to now and the header is replaced, regardless of whether
// it existed or not.
self.partials[i].header = Some(header);
self.partials[i].inserted = Instant::now(); self.partials[i].inserted = Instant::now();
} else { } else {
// Case 2: there was no partial with a matching block root.
//
// A new partial is added. This case permits adding a header without already known the
// root -- this is not possible in the wire protocol however we support it anyway.
self.partials.push(PartialBeaconBlock { self.partials.push(PartialBeaconBlock {
slot: header.slot,
block_root, block_root,
header, header: Some(header),
body: None, body: None,
inserted: Instant::now(), inserted: Instant::now(),
sender, sender,
@ -192,25 +233,54 @@ impl ImportQueue {
let body_root = Hash256::from_slice(&body.hash_tree_root()[..]); let body_root = Hash256::from_slice(&body.hash_tree_root()[..]);
self.partials.iter_mut().for_each(|mut p| { self.partials.iter_mut().for_each(|mut p| {
if body_root == p.header.block_body_root { if let Some(header) = &mut p.header {
p.inserted = Instant::now(); if body_root == header.block_body_root {
p.inserted = Instant::now();
if p.body.is_none() { if p.body.is_none() {
p.body = Some(body.clone()); p.body = Some(body.clone());
p.sender = sender.clone(); p.sender = sender.clone();
}
} }
} }
}); });
} }
/// Updates an existing `partial` with the completed block, or adds a new (complete) partial.
///
/// If the partial already existed, the `inserted` time is set to `now`.
fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) {
let block_root = Hash256::from_slice(&block.hash_tree_root()[..]);
let partial = PartialBeaconBlock {
slot: block.slot,
block_root,
header: Some(block.block_header()),
body: Some(block.body),
inserted: Instant::now(),
sender,
};
if let Some(i) = self
.partials
.iter()
.position(|p| p.block_root == block_root)
{
self.partials[i] = partial;
} else {
self.partials.push(partial)
}
}
} }
/// Individual components of a `BeaconBlock`, potentially all that are required to form a full /// Individual components of a `BeaconBlock`, potentially all that are required to form a full
/// `BeaconBlock`. /// `BeaconBlock`.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct PartialBeaconBlock { pub struct PartialBeaconBlock {
pub slot: Slot,
/// `BeaconBlock` root. /// `BeaconBlock` root.
pub block_root: Hash256, pub block_root: Hash256,
pub header: BeaconBlockHeader, pub header: Option<BeaconBlockHeader>,
pub body: Option<BeaconBlockBody>, pub body: Option<BeaconBlockBody>,
/// The instant at which this record was created or last meaningfully modified. Used to /// The instant at which this record was created or last meaningfully modified. Used to
/// determine if an entry is stale and should be removed. /// determine if an entry is stale and should be removed.
@ -225,7 +295,7 @@ impl PartialBeaconBlock {
pub fn complete(self) -> Option<(Hash256, BeaconBlock, PeerId)> { pub fn complete(self) -> Option<(Hash256, BeaconBlock, PeerId)> {
Some(( Some((
self.block_root, self.block_root,
self.header.into_block(self.body?), self.header?.into_block(self.body?),
self.sender, self.sender,
)) ))
} }

View File

@ -1,20 +1,25 @@
use super::import_queue::ImportQueue; use super::import_queue::ImportQueue;
use crate::beacon_chain::BeaconChain; use crate::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock};
use crate::message_handler::NetworkContext; use crate::message_handler::NetworkContext;
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use slog::{debug, error, info, o, warn}; use slog::{debug, error, info, o, warn};
use ssz::TreeHash;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use types::{Attestation, Epoch, Hash256, Slot}; use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot};
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. /// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
const SLOT_IMPORT_TOLERANCE: u64 = 100; const SLOT_IMPORT_TOLERANCE: u64 = 100;
/// The amount of seconds a block (or partial block) may exist in the import queue. /// The amount of seconds a block (or partial block) may exist in the import queue.
const QUEUE_STALE_SECS: u64 = 60; const QUEUE_STALE_SECS: u64 = 600;
/// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it.
/// Otherwise we queue it.
const FUTURE_SLOT_TOLERANCE: u64 = 1;
/// Keeps track of syncing information for known connected peers. /// Keeps track of syncing information for known connected peers.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
@ -60,7 +65,7 @@ pub enum PeerStatus {
} }
impl PeerStatus { impl PeerStatus {
pub fn should_handshake(&self) -> bool { pub fn should_handshake(self) -> bool {
match self { match self {
PeerStatus::DifferentNetworkId => false, PeerStatus::DifferentNetworkId => false,
PeerStatus::FinalizedEpochNotInChain => false, PeerStatus::FinalizedEpochNotInChain => false,
@ -358,31 +363,50 @@ impl SimpleSync {
if res.roots.is_empty() { if res.roots.is_empty() {
warn!( warn!(
self.log, self.log,
"Peer returned empty block roots response. PeerId: {:?}", peer_id "Peer returned empty block roots response";
"peer_id" => format!("{:?}", peer_id)
); );
return; return;
} }
let new_root_index = self.import_queue.first_new_root(&res.roots); // The wire protocol specifies that slots must be in ascending order.
if !res.slots_are_ascending() {
// If a new block root is found, request it and all the headers following it. warn!(
// self.log,
// We make an assumption here that if we don't know a block then we don't know of all "Peer returned block roots response with bad slot ordering";
// it's parents. This might not be the case if syncing becomes more sophisticated. "peer_id" => format!("{:?}", peer_id)
if let Some(i) = new_root_index { );
let new = &res.roots[i]; return;
self.request_block_headers(
peer_id,
BeaconBlockHeadersRequest {
start_root: new.block_root,
start_slot: new.slot,
max_headers: (res.roots.len() - i) as u64,
skip_slots: 0,
},
network,
)
} }
let new_roots = self
.import_queue
.enqueue_block_roots(&res.roots, peer_id.clone());
// No new roots means nothing to do.
//
// This check protects against future panics.
if new_roots.is_empty() {
return;
}
// Determine the first (earliest) and last (latest) `BlockRootSlot` items.
//
// This logic relies upon slots to be in ascending order, which is enforced earlier.
let first = new_roots.first().expect("Non-empty list must have first");
let last = new_roots.last().expect("Non-empty list must have last");
// Request all headers between the earliest and latest new `BlockRootSlot` items.
self.request_block_headers(
peer_id,
BeaconBlockHeadersRequest {
start_root: first.block_root,
start_slot: first.slot,
max_headers: (last.slot - first.slot + 1).as_u64(),
skip_slots: 0,
},
network,
)
} }
/// Handle a `BeaconBlockHeaders` request from the peer. /// Handle a `BeaconBlockHeaders` request from the peer.
@ -517,34 +541,148 @@ impl SimpleSync {
} }
/// Process a gossip message declaring a new block. /// Process a gossip message declaring a new block.
///
/// Returns a `bool` which, if `true`, indicates we should forward the block to our peers.
pub fn on_block_gossip( pub fn on_block_gossip(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
msg: BlockRootSlot, block: BeaconBlock,
network: &mut NetworkContext, network: &mut NetworkContext,
) { ) -> bool {
info!( info!(
self.log, self.log,
"NewGossipBlock"; "NewGossipBlock";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
); );
// TODO: filter out messages that a prior to the finalized slot.
// // Ignore any block from a finalized slot.
// TODO: if the block is a few more slots ahead, try to get all block roots from then until if self.slot_is_finalized(block.slot) {
// now. warn!(
// self.log, "NewGossipBlock";
// Note: only requests the new block -- will fail if we don't have its parents. "msg" => "new block slot is finalized.",
if self.import_queue.is_new_block(&msg.block_root) { "block_slot" => block.slot,
self.request_block_headers( );
peer_id, return false;
BeaconBlockHeadersRequest { }
start_root: msg.block_root,
start_slot: msg.slot, let block_root = Hash256::from_slice(&block.hash_tree_root());
max_headers: 1,
skip_slots: 0, // Ignore any block that the chain already knows about.
}, if self.chain_has_seen_block(&block_root) {
network, println!("this happened");
) // TODO: Age confirm that we shouldn't forward a block if we already know of it.
return false;
}
debug!(
self.log,
"NewGossipBlock";
"peer" => format!("{:?}", peer_id),
"msg" => "processing block",
);
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::ParentUnknown)) => {
// The block was valid and we processed it successfully.
debug!(
self.log, "NewGossipBlock";
"msg" => "parent block unknown",
"parent_root" => format!("{}", block.previous_block_root),
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue
.enqueue_full_blocks(vec![block], peer_id.clone());
// Send a hello to learn of the clients best slot so we can then sync the require
// parent(s).
network.send_rpc_request(
peer_id.clone(),
RPCRequest::Hello(self.chain.hello_message()),
);
// Forward the block onto our peers.
//
// Note: this may need to be changed if we decide to only forward blocks if we have
// all required info.
true
}
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::FutureSlot {
present_slot,
block_slot,
})) => {
if block_slot - present_slot > FUTURE_SLOT_TOLERANCE {
// The block is too far in the future, drop it.
warn!(
self.log, "NewGossipBlock";
"msg" => "future block rejected",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Do not forward the block around to peers.
false
} else {
// The block is in the future, but not too far.
warn!(
self.log, "NewGossipBlock";
"msg" => "queuing future block",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue.enqueue_full_blocks(vec![block], peer_id);
// Forward the block around to peers.
true
}
}
Ok(outcome) => {
if outcome.is_invalid() {
// The peer has sent a block which is fundamentally invalid.
warn!(
self.log, "NewGossipBlock";
"msg" => "invalid block from peer",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", peer_id),
);
// Disconnect the peer
network.disconnect(peer_id, GoodbyeReason::Fault);
// Do not forward the block to peers.
false
} else if outcome.sucessfully_processed() {
// The block was valid and we processed it successfully.
info!(
self.log, "NewGossipBlock";
"msg" => "block import successful",
"peer" => format!("{:?}", peer_id),
);
// Forward the block to peers
true
} else {
// The block wasn't necessarily invalid but we didn't process it successfully.
// This condition shouldn't be reached.
error!(
self.log, "NewGossipBlock";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", outcome),
);
// Do not forward the block on.
false
}
}
Err(e) => {
// We encountered an error whilst processing the block.
//
// Blocks should not be able to trigger errors, instead they should be flagged as
// invalid.
error!(
self.log, "NewGossipBlock";
"msg" => "internal error in processing block.",
"error" => format!("{:?}", e),
);
// Do not forward the block to peers.
false
}
} }
} }
@ -563,12 +701,9 @@ impl SimpleSync {
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
); );
// Awaiting a proper operations pool before we can import attestations.
//
// https://github.com/sigp/lighthouse/issues/281
match self.chain.process_attestation(msg) { match self.chain.process_attestation(msg) {
Ok(_) => panic!("Impossible, method not implemented."), Ok(()) => info!(self.log, "ImportedAttestation"),
Err(_) => error!(self.log, "Attestation processing not implemented!"), Err(e) => warn!(self.log, "InvalidAttestation"; "error" => format!("{:?}", e)),
} }
} }
@ -594,12 +729,21 @@ impl SimpleSync {
"reason" => format!("{:?}", outcome), "reason" => format!("{:?}", outcome),
); );
network.disconnect(sender, GoodbyeReason::Fault); network.disconnect(sender, GoodbyeReason::Fault);
break;
} }
// If this results to true, the item will be removed from the queue. // If this results to true, the item will be removed from the queue.
if outcome.sucessfully_processed() { if outcome.sucessfully_processed() {
successful += 1; successful += 1;
self.import_queue.remove(block_root); self.import_queue.remove(block_root);
} else {
debug!(
self.log,
"ProcessImportQueue";
"msg" => "Block not imported",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", sender),
);
} }
} }
Err(e) => { Err(e) => {
@ -678,6 +822,26 @@ impl SimpleSync {
network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req)); network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req));
} }
/// Returns `true` if `self.chain` has not yet processed this block.
pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool {
!self
.chain
.is_new_block_root(&block_root)
.unwrap_or_else(|_| {
error!(self.log, "Unable to determine if block is new.");
false
})
}
/// Returns `true` if the given slot is finalized in our chain.
fn slot_is_finalized(&self, slot: Slot) -> bool {
slot <= self
.chain
.hello_message()
.latest_finalized_epoch
.start_slot(self.chain.get_spec().slots_per_epoch)
}
/// Generates our current state in the form of a HELLO RPC message. /// Generates our current state in the form of a HELLO RPC message.
pub fn generate_hello(&self) -> HelloMessage { pub fn generate_hello(&self) -> HelloMessage {
self.chain.hello_message() self.chain.hello_message()

View File

@ -543,7 +543,7 @@ fn sync_two_nodes() {
// A provides block bodies to B. // A provides block bodies to B.
node_a.tee_block_body_response(&node_b); node_a.tee_block_body_response(&node_b);
std::thread::sleep(Duration::from_secs(10)); std::thread::sleep(Duration::from_secs(20));
node_b.harness.run_fork_choice(); node_b.harness.run_fork_choice();

View File

@ -0,0 +1,157 @@
use crate::beacon_chain::BeaconChain;
use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use protos::services::{
AttestationData as AttestationDataProto, ProduceAttestationDataRequest,
ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse,
};
use protos::services_grpc::AttestationService;
use slog::{error, info, trace, warn};
use ssz::{ssz_encode, Decodable};
use std::sync::Arc;
use types::Attestation;
#[derive(Clone)]
pub struct AttestationServiceInstance {
pub chain: Arc<BeaconChain>,
pub log: slog::Logger,
}
impl AttestationService for AttestationServiceInstance {
/// Produce the `AttestationData` for signing by a validator.
fn produce_attestation_data(
&mut self,
ctx: RpcContext,
req: ProduceAttestationDataRequest,
sink: UnarySink<ProduceAttestationDataResponse>,
) {
trace!(
&self.log,
"Attempting to produce attestation at slot {}",
req.get_slot()
);
// verify the slot, drop lock on state afterwards
{
let slot_requested = req.get_slot();
let state = self.chain.get_state();
// Start by performing some checks
// Check that the AttestionData is for the current slot (otherwise it will not be valid)
if slot_requested > state.slot.as_u64() {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::OutOfRange,
Some(
"AttestationData request for a slot that is in the future.".to_string(),
),
))
.map_err(move |e| {
error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e)
});
return ctx.spawn(f);
}
// currently cannot handle past slots. TODO: Handle this case
else if slot_requested < state.slot.as_u64() {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("AttestationData request for a slot that is in the past.".to_string()),
))
.map_err(move |e| {
error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e)
});
return ctx.spawn(f);
}
}
// Then get the AttestationData from the beacon chain
let shard = req.get_shard();
let attestation_data = match self.chain.produce_attestation_data(shard) {
Ok(v) => v,
Err(e) => {
// Could not produce an attestation
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce an attestation: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let mut attestation_data_proto = AttestationDataProto::new();
attestation_data_proto.set_ssz(ssz_encode(&attestation_data));
let mut resp = ProduceAttestationDataResponse::new();
resp.set_attestation_data(attestation_data_proto);
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "Failed to reply with success {:?}: {:?}", req, e));
ctx.spawn(f)
}
/// Accept some fully-formed `FreeAttestation` from the validator,
/// store it, and aggregate it into an `Attestation`.
fn publish_attestation(
&mut self,
ctx: RpcContext,
req: PublishAttestationRequest,
sink: UnarySink<PublishAttestationResponse>,
) {
trace!(self.log, "Publishing attestation");
let mut resp = PublishAttestationResponse::new();
let ssz_serialized_attestation = req.get_attestation().get_ssz();
let attestation = match Attestation::ssz_decode(ssz_serialized_attestation, 0) {
Ok((v, _index)) => v,
Err(_) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid attestation".to_string()),
))
.map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f);
}
};
match self.chain.process_attestation(attestation) {
Ok(_) => {
// Attestation was successfully processed.
info!(
self.log,
"PublishAttestation";
"type" => "valid_attestation",
);
resp.set_success(true);
}
Err(e) => {
// Attestation was invalid
warn!(
self.log,
"PublishAttestation";
"type" => "invalid_attestation",
"error" => format!("{:?}", e),
);
resp.set_success(false);
resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec());
}
};
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,61 +0,0 @@
use futures::Future;
use grpcio::{RpcContext, UnarySink};
use protos::services::{
Attestation as AttestationProto, ProduceAttestation, ProduceAttestationResponse,
ProduceAttestationRequest, PublishAttestationResponse, PublishAttestationRequest,
PublishAttestation
};
use protos::services_grpc::BeaconBlockService;
use slog::Logger;
#[derive(Clone)]
pub struct AttestationServiceInstance {
pub log: Logger,
}
impl AttestationService for AttestationServiceInstance {
/// Produce a `BeaconBlock` for signing by a validator.
fn produce_attestation(
&mut self,
ctx: RpcContext,
req: ProduceAttestationRequest,
sink: UnarySink<ProduceAttestationResponse>,
) {
println!("producing attestation at slot {}", req.get_slot());
// TODO: build a legit block.
let mut attestation = AttestationProto::new();
attestation.set_slot(req.get_slot());
// TODO Set the shard to something legit.
attestation.set_shard(0);
attestation.set_block_root(b"cats".to_vec());
let mut resp = ProduceAttestationResponse::new();
resp.set_attestation_data(attestation);
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
/// Accept some fully-formed `BeaconBlock`, process and publish it.
fn publish_attestation(
&mut self,
ctx: RpcContext,
req: PublishAttestationRequest,
sink: UnarySink<PublishAttestationResponse>,
) {
println!("publishing attestation {:?}", req.get_block());
// TODO: actually process the block.
let mut resp = PublishAttestationResponse::new();
resp.set_success(true);
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,9 +1,8 @@
use crate::beacon_chain::BeaconChain; use crate::beacon_chain::BeaconChain;
use crossbeam_channel; use crossbeam_channel;
use eth2_libp2p::rpc::methods::BlockRootSlot;
use eth2_libp2p::PubsubMessage; use eth2_libp2p::PubsubMessage;
use futures::Future; use futures::Future;
use grpcio::{RpcContext, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage; use network::NetworkMessage;
use protos::services::{ use protos::services::{
BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse, BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse,
@ -11,10 +10,10 @@ use protos::services::{
}; };
use protos::services_grpc::BeaconBlockService; use protos::services_grpc::BeaconBlockService;
use slog::Logger; use slog::Logger;
use slog::{debug, error, info, warn}; use slog::{error, info, trace, warn};
use ssz::{Decodable, TreeHash}; use ssz::{ssz_encode, Decodable};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, Hash256, Slot}; use types::{BeaconBlock, Signature, Slot};
#[derive(Clone)] #[derive(Clone)]
pub struct BeaconBlockServiceInstance { pub struct BeaconBlockServiceInstance {
@ -31,11 +30,44 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
req: ProduceBeaconBlockRequest, req: ProduceBeaconBlockRequest,
sink: UnarySink<ProduceBeaconBlockResponse>, sink: UnarySink<ProduceBeaconBlockResponse>,
) { ) {
println!("producing at slot {}", req.get_slot()); trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req));
// decode the request
// TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336
let _requested_slot = Slot::from(req.get_slot());
let randao_reveal = match Signature::ssz_decode(req.get_randao_reveal(), 0) {
Ok((reveal, _index)) => reveal,
Err(_) => {
// decode error, incorrect signature
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid randao reveal signature".to_string()),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let produced_block = match self.chain.produce_block(randao_reveal) {
Ok((block, _state)) => block,
Err(e) => {
// could not produce a block
let log_clone = self.log.clone();
warn!(self.log, "RPC Error"; "Error" => format!("Could not produce a block:{:?}",e));
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce a block: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// TODO: build a legit block.
let mut block = BeaconBlockProto::new(); let mut block = BeaconBlockProto::new();
block.set_ssz(b"cats".to_vec()); block.set_ssz(ssz_encode(&produced_block));
let mut resp = ProduceBeaconBlockResponse::new(); let mut resp = ProduceBeaconBlockResponse::new();
resp.set_block(block); resp.set_block(block);
@ -53,14 +85,14 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
req: PublishBeaconBlockRequest, req: PublishBeaconBlockRequest,
sink: UnarySink<PublishBeaconBlockResponse>, sink: UnarySink<PublishBeaconBlockResponse>,
) { ) {
trace!(&self.log, "Attempting to publish a block");
let mut resp = PublishBeaconBlockResponse::new(); let mut resp = PublishBeaconBlockResponse::new();
let ssz_serialized_block = req.get_block().get_ssz(); let ssz_serialized_block = req.get_block().get_ssz();
match BeaconBlock::ssz_decode(ssz_serialized_block, 0) { match BeaconBlock::ssz_decode(ssz_serialized_block, 0) {
Ok((block, _i)) => { Ok((block, _i)) => {
let block_root = Hash256::from_slice(&block.hash_tree_root()[..]);
match self.chain.process_block(block.clone()) { match self.chain.process_block(block.clone()) {
Ok(outcome) => { Ok(outcome) => {
if outcome.sucessfully_processed() { if outcome.sucessfully_processed() {
@ -76,16 +108,22 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
// TODO: Obtain topics from the network service properly. // TODO: Obtain topics from the network service properly.
let topic = let topic =
types::TopicBuilder::new("beacon_chain".to_string()).build(); types::TopicBuilder::new("beacon_chain".to_string()).build();
let message = PubsubMessage::Block(BlockRootSlot { let message = PubsubMessage::Block(block);
block_root,
slot: block.slot,
});
println!("Sending beacon block to gossipsub"); // Publish the block to the p2p network via gossipsub.
self.network_chan.send(NetworkMessage::Publish { self.network_chan
topics: vec![topic], .send(NetworkMessage::Publish {
message, topics: vec![topic],
}); message: Box::new(message),
})
.unwrap_or_else(|e| {
error!(
self.log,
"PublishBeaconBlock";
"type" => "failed to publish to gossipsub",
"error" => format!("{:?}", e)
);
});
resp.set_success(true); resp.set_success(true);
} else if outcome.is_invalid() { } else if outcome.is_invalid() {

View File

@ -2,12 +2,13 @@ use beacon_chain::BeaconChain as RawBeaconChain;
use beacon_chain::{ use beacon_chain::{
db::ClientDB, db::ClientDB,
fork_choice::ForkChoice, fork_choice::ForkChoice,
parking_lot::RwLockReadGuard, parking_lot::{RwLockReadGuard, RwLockWriteGuard},
slot_clock::SlotClock, slot_clock::SlotClock,
types::{BeaconState, ChainSpec}, types::{BeaconState, ChainSpec, Signature},
AttestationValidationError, BlockProductionError,
}; };
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome}; pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome};
use types::BeaconBlock; use types::{Attestation, AttestationData, BeaconBlock};
/// The RPC's API to the beacon chain. /// The RPC's API to the beacon chain.
pub trait BeaconChain: Send + Sync { pub trait BeaconChain: Send + Sync {
@ -15,8 +16,22 @@ pub trait BeaconChain: Send + Sync {
fn get_state(&self) -> RwLockReadGuard<BeaconState>; fn get_state(&self) -> RwLockReadGuard<BeaconState>;
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState>;
fn process_block(&self, block: BeaconBlock) fn process_block(&self, block: BeaconBlock)
-> Result<BlockProcessingOutcome, BeaconChainError>; -> Result<BlockProcessingOutcome, BeaconChainError>;
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError>;
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError>;
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError>;
} }
impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F> impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F>
@ -33,10 +48,32 @@ where
self.state.read() self.state.read()
} }
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState> {
self.state.write()
}
fn process_block( fn process_block(
&self, &self,
block: BeaconBlock, block: BeaconBlock,
) -> Result<BlockProcessingOutcome, BeaconChainError> { ) -> Result<BlockProcessingOutcome, BeaconChainError> {
self.process_block(block) self.process_block(block)
} }
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
self.produce_block(randao_reveal)
}
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError> {
self.produce_attestation_data(shard)
}
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError> {
self.process_attestation(attestation)
}
} }

View File

@ -24,7 +24,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance {
// get the chain state // get the chain state
let state = self.chain.get_state(); let state = self.chain.get_state();
let state_fork = state.fork.clone(); let state_fork = state.fork.clone();
let genesis_time = state.genesis_time.clone(); let genesis_time = state.genesis_time;
// build the rpc fork struct // build the rpc fork struct
let mut fork = Fork::new(); let mut fork = Fork::new();
@ -35,7 +35,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance {
node_info.set_fork(fork); node_info.set_fork(fork);
node_info.set_genesis_time(genesis_time); node_info.set_genesis_time(genesis_time);
node_info.set_genesis_slot(self.chain.get_spec().genesis_slot.as_u64()); node_info.set_genesis_slot(self.chain.get_spec().genesis_slot.as_u64());
node_info.set_chain_id(self.chain.get_spec().chain_id as u32); node_info.set_chain_id(u32::from(self.chain.get_spec().chain_id));
// send the node_info the requester // send the node_info the requester
let error_log = self.log.clone(); let error_log = self.log.clone();

View File

@ -1,19 +1,22 @@
mod attestation;
mod beacon_block; mod beacon_block;
pub mod beacon_chain; pub mod beacon_chain;
mod beacon_node; mod beacon_node;
pub mod config; pub mod config;
mod validator; mod validator;
use self::attestation::AttestationServiceInstance;
use self::beacon_block::BeaconBlockServiceInstance; use self::beacon_block::BeaconBlockServiceInstance;
use self::beacon_chain::BeaconChain; use self::beacon_chain::BeaconChain;
use self::beacon_node::BeaconNodeServiceInstance; use self::beacon_node::BeaconNodeServiceInstance;
use self::validator::ValidatorServiceInstance; use self::validator::ValidatorServiceInstance;
pub use config::Config as RPCConfig; pub use config::Config as RPCConfig;
use futures::{future, Future}; use futures::Future;
use grpcio::{Environment, Server, ServerBuilder}; use grpcio::{Environment, ServerBuilder};
use network::NetworkMessage; use network::NetworkMessage;
use protos::services_grpc::{ use protos::services_grpc::{
create_beacon_block_service, create_beacon_node_service, create_validator_service, create_attestation_service, create_beacon_block_service, create_beacon_node_service,
create_validator_service,
}; };
use slog::{info, o, warn}; use slog::{info, o, warn};
use std::sync::Arc; use std::sync::Arc;
@ -56,11 +59,19 @@ pub fn start_server(
}; };
create_validator_service(instance) create_validator_service(instance)
}; };
let attestation_service = {
let instance = AttestationServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_attestation_service(instance)
};
let mut server = ServerBuilder::new(env) let mut server = ServerBuilder::new(env)
.register_service(beacon_block_service) .register_service(beacon_block_service)
.register_service(validator_service) .register_service(validator_service)
.register_service(beacon_node_service) .register_service(beacon_node_service)
.register_service(attestation_service)
.bind(config.listen_address.to_string(), config.port) .bind(config.listen_address.to_string(), config.port)
.build() .build()
.unwrap(); .unwrap();

View File

@ -4,15 +4,15 @@ use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use protos::services::{ActiveValidator, GetDutiesRequest, GetDutiesResponse, ValidatorDuty}; use protos::services::{ActiveValidator, GetDutiesRequest, GetDutiesResponse, ValidatorDuty};
use protos::services_grpc::ValidatorService; use protos::services_grpc::ValidatorService;
use slog::{debug, info, warn, Logger}; use slog::{trace, warn};
use ssz::Decodable; use ssz::decode;
use std::sync::Arc; use std::sync::Arc;
use types::{Epoch, RelativeEpoch}; use types::{Epoch, RelativeEpoch};
#[derive(Clone)] #[derive(Clone)]
pub struct ValidatorServiceInstance { pub struct ValidatorServiceInstance {
pub chain: Arc<BeaconChain>, pub chain: Arc<BeaconChain>,
pub log: Logger, pub log: slog::Logger,
} }
//TODO: Refactor Errors //TODO: Refactor Errors
@ -27,14 +27,13 @@ impl ValidatorService for ValidatorServiceInstance {
sink: UnarySink<GetDutiesResponse>, sink: UnarySink<GetDutiesResponse>,
) { ) {
let validators = req.get_validators(); let validators = req.get_validators();
debug!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch());
let epoch = Epoch::from(req.get_epoch());
let mut resp = GetDutiesResponse::new();
let resp_validators = resp.mut_active_validators();
let spec = self.chain.get_spec(); let spec = self.chain.get_spec();
let state = self.chain.get_state(); let state = self.chain.get_state();
let epoch = Epoch::from(req.get_epoch());
let mut resp = GetDutiesResponse::new();
let resp_validators = resp.mut_active_validators();
let relative_epoch = let relative_epoch =
match RelativeEpoch::from_epoch(state.slot.epoch(spec.slots_per_epoch), epoch) { match RelativeEpoch::from_epoch(state.slot.epoch(spec.slots_per_epoch), epoch) {
@ -75,8 +74,8 @@ impl ValidatorService for ValidatorServiceInstance {
for validator_pk in validators.get_public_keys() { for validator_pk in validators.get_public_keys() {
let mut active_validator = ActiveValidator::new(); let mut active_validator = ActiveValidator::new();
let public_key = match PublicKey::ssz_decode(validator_pk, 0) { let public_key = match decode::<PublicKey>(validator_pk) {
Ok((v, _index)) => v, Ok(v) => v,
Err(_) => { Err(_) => {
let log_clone = self.log.clone(); let log_clone = self.log.clone();
let f = sink let f = sink
@ -84,7 +83,7 @@ impl ValidatorService for ValidatorServiceInstance {
RpcStatusCode::InvalidArgument, RpcStatusCode::InvalidArgument,
Some("Invalid public_key".to_string()), Some("Invalid public_key".to_string()),
)) ))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}", req)); .map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f); return ctx.spawn(f);
} }
}; };
@ -157,6 +156,7 @@ impl ValidatorService for ValidatorServiceInstance {
duty.set_committee_index(attestation_duties.committee_index as u64); duty.set_committee_index(attestation_duties.committee_index as u64);
duty.set_attestation_slot(attestation_duties.slot.as_u64()); duty.set_attestation_slot(attestation_duties.slot.as_u64());
duty.set_attestation_shard(attestation_duties.shard); duty.set_attestation_shard(attestation_duties.shard);
duty.set_committee_len(attestation_duties.committee_len as u64);
active_validator.set_duty(duty); active_validator.set_duty(duty);
resp_validators.push(active_validator); resp_validators.push(active_validator);

View File

@ -16,6 +16,7 @@ fn main() {
.version(version::version().as_str()) .version(version::version().as_str())
.author("Sigma Prime <contact@sigmaprime.io>") .author("Sigma Prime <contact@sigmaprime.io>")
.about("Eth 2.0 Client") .about("Eth 2.0 Client")
// file system related arguments
.arg( .arg(
Arg::with_name("datadir") Arg::with_name("datadir")
.long("datadir") .long("datadir")
@ -23,8 +24,9 @@ fn main() {
.help("Data directory for keys and databases.") .help("Data directory for keys and databases.")
.takes_value(true), .takes_value(true),
) )
// network related arguments
.arg( .arg(
Arg::with_name("listen_address") Arg::with_name("listen-address")
.long("listen-address") .long("listen-address")
.value_name("Listen Address") .value_name("Listen Address")
.help("The Network address to listen for p2p connections.") .help("The Network address to listen for p2p connections.")
@ -37,6 +39,14 @@ fn main() {
.help("Network listen port for p2p connections.") .help("Network listen port for p2p connections.")
.takes_value(true), .takes_value(true),
) )
.arg(
Arg::with_name("boot-nodes")
.long("boot-nodes")
.value_name("BOOTNODES")
.help("A list of comma separated multi addresses representing bootnodes to connect to.")
.takes_value(true),
)
// rpc related arguments
.arg( .arg(
Arg::with_name("rpc") Arg::with_name("rpc")
.long("rpc") .long("rpc")

View File

@ -94,7 +94,7 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V,
} }
fn produce_attestation(&mut self, slot: Slot, shard: u64) -> Result<PollOutcome, Error> { fn produce_attestation(&mut self, slot: Slot, shard: u64) -> Result<PollOutcome, Error> {
let attestation_data = match self.beacon_node.produce_attestation(slot, shard)? { let attestation_data = match self.beacon_node.produce_attestation_data(slot, shard)? {
Some(attestation_data) => attestation_data, Some(attestation_data) => attestation_data,
None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)), None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)),
}; };

View File

@ -26,7 +26,7 @@ impl SimulatedBeaconNode {
} }
impl BeaconNode for SimulatedBeaconNode { impl BeaconNode for SimulatedBeaconNode {
fn produce_attestation(&self, slot: Slot, shard: u64) -> ProduceResult { fn produce_attestation_data(&self, slot: Slot, shard: u64) -> ProduceResult {
*self.produce_input.write().unwrap() = Some((slot, shard)); *self.produce_input.write().unwrap() = Some((slot, shard));
match *self.produce_result.read().unwrap() { match *self.produce_result.read().unwrap() {
Some(ref r) => r.clone(), Some(ref r) => r.clone(),

View File

@ -14,7 +14,7 @@ pub enum PublishOutcome {
/// Defines the methods required to produce and publish blocks on a Beacon Node. /// Defines the methods required to produce and publish blocks on a Beacon Node.
pub trait BeaconNode: Send + Sync { pub trait BeaconNode: Send + Sync {
fn produce_attestation( fn produce_attestation_data(
&self, &self,
slot: Slot, slot: Slot,
shard: u64, shard: u64,

View File

@ -0,0 +1,13 @@
[package]
name = "operation_pool"
version = "0.1.0"
authors = ["Michael Sproul <michael@sigmaprime.io>"]
edition = "2018"
[dependencies]
int_to_bytes = { path = "../utils/int_to_bytes" }
itertools = "0.8"
parking_lot = "0.7"
types = { path = "../types" }
state_processing = { path = "../state_processing" }
ssz = { path = "../utils/ssz" }

View File

@ -0,0 +1,987 @@
use int_to_bytes::int_to_bytes8;
use itertools::Itertools;
use parking_lot::RwLock;
use ssz::ssz_encode;
use state_processing::per_block_processing::errors::{
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
};
use state_processing::per_block_processing::{
gather_attester_slashing_indices_modular, validate_attestation,
validate_attestation_time_independent_only, verify_attester_slashing, verify_deposit,
verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer,
verify_transfer_time_independent_only,
};
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
use types::chain_spec::Domain;
use types::{
Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch,
ProposerSlashing, Transfer, Validator, VoluntaryExit,
};
#[cfg(test)]
const VERIFY_DEPOSIT_PROOFS: bool = false;
#[cfg(not(test))]
const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this
#[derive(Default)]
pub struct OperationPool {
/// Map from attestation ID (see below) to vectors of attestations.
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
/// Map from deposit index to deposit data.
// NOTE: We assume that there is only one deposit per index
// because the Eth1 data is updated (at most) once per epoch,
// and the spec doesn't seem to accomodate for re-orgs on a time-frame
// longer than an epoch
deposits: RwLock<BTreeMap<u64, Deposit>>,
/// Map from two attestation IDs to a slashing for those IDs.
attester_slashings: RwLock<HashMap<(AttestationId, AttestationId), AttesterSlashing>>,
/// Map from proposer index to slashing.
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
/// Map from exiting validator to their exit data.
voluntary_exits: RwLock<HashMap<u64, VoluntaryExit>>,
/// Set of transfers.
transfers: RwLock<HashSet<Transfer>>,
}
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
#[derive(PartialEq, Eq, Clone, Hash, Debug)]
struct AttestationId(Vec<u8>);
/// Number of domain bytes that the end of an attestation ID is padded with.
const DOMAIN_BYTES_LEN: usize = 8;
impl AttestationId {
fn from_data(attestation: &AttestationData, state: &BeaconState, spec: &ChainSpec) -> Self {
let mut bytes = ssz_encode(attestation);
let epoch = attestation.slot.epoch(spec.slots_per_epoch);
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
AttestationId(bytes)
}
fn compute_domain_bytes(epoch: Epoch, state: &BeaconState, spec: &ChainSpec) -> Vec<u8> {
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
}
fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool {
&self.0[self.0.len() - DOMAIN_BYTES_LEN..] == domain_bytes
}
}
/// Compute a fitness score for an attestation.
///
/// The score is calculated by determining the number of *new* attestations that
/// the aggregate attestation introduces, and is proportional to the size of the reward we will
/// receive for including it in a block.
// TODO: this could be optimised with a map from validator index to whether that validator has
// attested in each of the current and previous epochs. Currently quadractic in number of validators.
fn attestation_score(attestation: &Attestation, state: &BeaconState, spec: &ChainSpec) -> usize {
// Bitfield of validators whose attestations are new/fresh.
let mut new_validators = attestation.aggregation_bitfield.clone();
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
let state_attestations = if attestation_epoch == state.current_epoch(spec) {
&state.current_epoch_attestations
} else if attestation_epoch == state.previous_epoch(spec) {
&state.previous_epoch_attestations
} else {
return 0;
};
state_attestations
.iter()
// In a single epoch, an attester should only be attesting for one shard.
// TODO: we avoid including slashable attestations in the state here,
// but maybe we should do something else with them (like construct slashings).
.filter(|current_attestation| current_attestation.data.shard == attestation.data.shard)
.for_each(|current_attestation| {
// Remove the validators who have signed the existing attestation (they are not new)
new_validators.difference_inplace(&current_attestation.aggregation_bitfield);
});
new_validators.num_set_bits()
}
#[derive(Debug, PartialEq, Clone)]
pub enum DepositInsertStatus {
/// The deposit was not already in the pool.
Fresh,
/// The deposit already existed in the pool.
Duplicate,
/// The deposit conflicted with an existing deposit, which was replaced.
Replaced(Box<Deposit>),
}
impl OperationPool {
/// Create a new operation pool.
pub fn new() -> Self {
Self::default()
}
/// Insert an attestation into the pool, aggregating it with existing attestations if possible.
pub fn insert_attestation(
&self,
attestation: Attestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
// Check that attestation signatures are valid.
validate_attestation_time_independent_only(state, &attestation, spec)?;
let id = AttestationId::from_data(&attestation.data, state, spec);
// Take a write lock on the attestations map.
let mut attestations = self.attestations.write();
let existing_attestations = match attestations.entry(id) {
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![attestation]);
return Ok(());
}
hash_map::Entry::Occupied(entry) => entry.into_mut(),
};
let mut aggregated = false;
for existing_attestation in existing_attestations.iter_mut() {
if existing_attestation.signers_disjoint_from(&attestation) {
existing_attestation.aggregate(&attestation);
aggregated = true;
} else if *existing_attestation == attestation {
aggregated = true;
}
}
if !aggregated {
existing_attestations.push(attestation);
}
Ok(())
}
/// Total number of attestations in the pool, including attestations for the same data.
pub fn num_attestations(&self) -> usize {
self.attestations
.read()
.values()
.map(|atts| atts.len())
.sum()
}
/// Get a list of attestations for inclusion in a block.
pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Attestation> {
// Attestations for the current fork, which may be from the current or previous epoch.
let prev_epoch = state.previous_epoch(spec);
let current_epoch = state.current_epoch(spec);
let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec);
let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec);
self.attestations
.read()
.iter()
.filter(|(key, _)| {
key.domain_bytes_match(&prev_domain_bytes)
|| key.domain_bytes_match(&curr_domain_bytes)
})
.flat_map(|(_, attestations)| attestations)
// That are not superseded by an attestation included in the state...
.filter(|attestation| !superior_attestation_exists_in_state(state, attestation))
// That are valid...
.filter(|attestation| validate_attestation(state, attestation, spec).is_ok())
// Scored by the number of new attestations they introduce (descending)
// TODO: need to consider attestations introduced in THIS block
.map(|att| (att, attestation_score(att, state, spec)))
// Don't include any useless attestations (score 0)
.filter(|&(_, score)| score != 0)
.sorted_by_key(|&(_, score)| std::cmp::Reverse(score))
// Limited to the maximum number of attestations per block
.take(spec.max_attestations as usize)
.map(|(att, _)| att)
.cloned()
.collect()
}
/// Remove attestations which are too old to be included in a block.
// TODO: we could probably prune other attestations here:
// - ones that are completely covered by attestations included in the state
// - maybe ones invalidated by the confirmation of one fork over another
pub fn prune_attestations(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.attestations.write().retain(|_, attestations| {
// All the attestations in this bucket have the same data, so we only need to
// check the first one.
attestations.first().map_or(false, |att| {
finalized_state.slot < att.data.slot + spec.slots_per_epoch
})
});
}
/// Add a deposit to the pool.
///
/// No two distinct deposits should be added with the same index.
pub fn insert_deposit(
&self,
deposit: Deposit,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<DepositInsertStatus, DepositValidationError> {
use DepositInsertStatus::*;
match self.deposits.write().entry(deposit.index) {
Entry::Vacant(entry) => {
verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?;
entry.insert(deposit);
Ok(Fresh)
}
Entry::Occupied(mut entry) => {
if entry.get() == &deposit {
Ok(Duplicate)
} else {
verify_deposit(state, &deposit, VERIFY_DEPOSIT_PROOFS, spec)?;
Ok(Replaced(Box::new(entry.insert(deposit))))
}
}
}
}
/// Get an ordered list of deposits for inclusion in a block.
///
/// Take at most the maximum number of deposits, beginning from the current deposit index.
pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Deposit> {
let start_idx = state.deposit_index;
(start_idx..start_idx + spec.max_deposits)
.map(|idx| self.deposits.read().get(&idx).cloned())
.take_while(Option::is_some)
.flatten()
.collect()
}
/// Remove all deposits with index less than the deposit index of the latest finalised block.
pub fn prune_deposits(&self, state: &BeaconState) -> BTreeMap<u64, Deposit> {
let deposits_keep = self.deposits.write().split_off(&state.deposit_index);
std::mem::replace(&mut self.deposits.write(), deposits_keep)
}
/// The number of deposits stored in the pool.
pub fn num_deposits(&self) -> usize {
self.deposits.read().len()
}
/// Insert a proposer slashing into the pool.
pub fn insert_proposer_slashing(
&self,
slashing: ProposerSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), ProposerSlashingValidationError> {
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
// because they could *become* known later
verify_proposer_slashing(&slashing, state, spec)?;
self.proposer_slashings
.write()
.insert(slashing.proposer_index, slashing);
Ok(())
}
/// Compute the tuple ID that is used to identify an attester slashing.
///
/// Depends on the fork field of the state, but not on the state's epoch.
fn attester_slashing_id(
slashing: &AttesterSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> (AttestationId, AttestationId) {
(
AttestationId::from_data(&slashing.slashable_attestation_1.data, state, spec),
AttestationId::from_data(&slashing.slashable_attestation_2.data, state, spec),
)
}
/// Insert an attester slashing into the pool.
pub fn insert_attester_slashing(
&self,
slashing: AttesterSlashing,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), AttesterSlashingValidationError> {
verify_attester_slashing(state, &slashing, true, spec)?;
let id = Self::attester_slashing_id(&slashing, state, spec);
self.attester_slashings.write().insert(id, slashing);
Ok(())
}
/// Get proposer and attester slashings for inclusion in a block.
///
/// This function computes both types of slashings together, because
/// attester slashings may be invalidated by proposer slashings included
/// earlier in the block.
pub fn get_slashings(
&self,
state: &BeaconState,
spec: &ChainSpec,
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing>) {
let proposer_slashings = filter_limit_operations(
self.proposer_slashings.read().values(),
|slashing| {
state
.validator_registry
.get(slashing.proposer_index as usize)
.map_or(false, |validator| !validator.slashed)
},
spec.max_proposer_slashings,
);
// Set of validators to be slashed, so we don't attempt to construct invalid attester
// slashings.
let mut to_be_slashed = proposer_slashings
.iter()
.map(|s| s.proposer_index)
.collect::<HashSet<_>>();
let attester_slashings = self
.attester_slashings
.read()
.iter()
.filter(|(id, slashing)| {
// Check the fork.
Self::attester_slashing_id(slashing, state, spec) == **id
})
.filter(|(_, slashing)| {
// Take all slashings that will slash 1 or more validators.
let slashed_validators = gather_attester_slashing_indices_modular(
state,
slashing,
|index, validator| validator.slashed || to_be_slashed.contains(&index),
spec,
);
// Extend the `to_be_slashed` set so subsequent iterations don't try to include
// useless slashings.
if let Ok(validators) = slashed_validators {
to_be_slashed.extend(validators);
true
} else {
false
}
})
.take(spec.max_attester_slashings as usize)
.map(|(_, slashing)| slashing.clone())
.collect();
(proposer_slashings, attester_slashings)
}
/// Prune proposer slashings for all slashed or withdrawn validators.
pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
prune_validator_hash_map(
&mut self.proposer_slashings.write(),
|validator| {
validator.slashed
|| validator.is_withdrawable_at(finalized_state.current_epoch(spec))
},
finalized_state,
);
}
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
/// fork.
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.attester_slashings.write().retain(|id, slashing| {
let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id;
let curr_epoch = finalized_state.current_epoch(spec);
let slashing_ok = gather_attester_slashing_indices_modular(
finalized_state,
slashing,
|_, validator| validator.slashed || validator.is_withdrawable_at(curr_epoch),
spec,
)
.is_ok();
fork_ok && slashing_ok
});
}
/// Insert a voluntary exit, validating it almost-entirely (future exits are permitted).
pub fn insert_voluntary_exit(
&self,
exit: VoluntaryExit,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), ExitValidationError> {
verify_exit_time_independent_only(state, &exit, spec)?;
self.voluntary_exits
.write()
.insert(exit.validator_index, exit);
Ok(())
}
/// Get a list of voluntary exits for inclusion in a block.
pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<VoluntaryExit> {
filter_limit_operations(
self.voluntary_exits.read().values(),
|exit| verify_exit(state, exit, spec).is_ok(),
spec.max_voluntary_exits,
)
}
/// Prune if validator has already exited at the last finalized state.
pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
prune_validator_hash_map(
&mut self.voluntary_exits.write(),
|validator| validator.is_exited_at(finalized_state.current_epoch(spec)),
finalized_state,
);
}
/// Insert a transfer into the pool, checking it for validity in the process.
pub fn insert_transfer(
&self,
transfer: Transfer,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), TransferValidationError> {
// The signature of the transfer isn't hashed, but because we check
// it before we insert into the HashSet, we can't end up with duplicate
// transactions.
verify_transfer_time_independent_only(state, &transfer, spec)?;
self.transfers.write().insert(transfer);
Ok(())
}
/// Get a list of transfers for inclusion in a block.
// TODO: improve the economic optimality of this function by accounting for
// dependencies between transfers in the same block e.g. A pays B, B pays C
pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Transfer> {
self.transfers
.read()
.iter()
.filter(|transfer| verify_transfer(state, transfer, spec).is_ok())
.sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee))
.take(spec.max_transfers as usize)
.cloned()
.collect()
}
/// Prune the set of transfers by removing all those whose slot has already passed.
pub fn prune_transfers(&self, finalized_state: &BeaconState) {
self.transfers
.write()
.retain(|transfer| transfer.slot > finalized_state.slot)
}
/// Prune all types of transactions given the latest finalized state.
pub fn prune_all(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
self.prune_attestations(finalized_state, spec);
self.prune_deposits(finalized_state);
self.prune_proposer_slashings(finalized_state, spec);
self.prune_attester_slashings(finalized_state, spec);
self.prune_voluntary_exits(finalized_state, spec);
self.prune_transfers(finalized_state);
}
}
/// Returns `true` if the state already contains a `PendingAttestation` that is superior to the
/// given `attestation`.
///
/// A validator has nothing to gain from re-including an attestation and it adds load to the
/// network.
///
/// An existing `PendingAttestation` is superior to an existing `attestation` if:
///
/// - Their `AttestationData` is equal.
/// - `attestation` does not contain any signatures that `PendingAttestation` does not have.
fn superior_attestation_exists_in_state(state: &BeaconState, attestation: &Attestation) -> bool {
state
.current_epoch_attestations
.iter()
.chain(state.previous_epoch_attestations.iter())
.any(|existing_attestation| {
let bitfield = &attestation.aggregation_bitfield;
let existing_bitfield = &existing_attestation.aggregation_bitfield;
existing_attestation.data == attestation.data
&& bitfield.intersection(existing_bitfield).num_set_bits()
== bitfield.num_set_bits()
})
}
/// Filter up to a maximum number of operations out of an iterator.
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec<T>
where
I: IntoIterator<Item = &'a T>,
F: Fn(&T) -> bool,
T: Clone,
{
operations
.into_iter()
.filter(|x| filter(*x))
.take(limit as usize)
.cloned()
.collect()
}
/// Remove all entries from the given hash map for which `prune_if` returns true.
///
/// The keys in the map should be validator indices, which will be looked up
/// in the state's validator registry and then passed to `prune_if`.
/// Entries for unknown validators will be kept.
fn prune_validator_hash_map<T, F>(
map: &mut HashMap<u64, T>,
prune_if: F,
finalized_state: &BeaconState,
) where
F: Fn(&Validator) -> bool,
{
map.retain(|&validator_index, _| {
finalized_state
.validator_registry
.get(validator_index as usize)
.map_or(true, |validator| !prune_if(validator))
});
}
#[cfg(test)]
mod tests {
use super::DepositInsertStatus::*;
use super::*;
use types::test_utils::*;
use types::*;
#[test]
fn insert_deposit() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (ref spec, ref state) = test_state(rng);
let op_pool = OperationPool::new();
let deposit1 = make_deposit(rng, state, spec);
let mut deposit2 = make_deposit(rng, state, spec);
deposit2.index = deposit1.index;
assert_eq!(
op_pool.insert_deposit(deposit1.clone(), state, spec),
Ok(Fresh)
);
assert_eq!(
op_pool.insert_deposit(deposit1.clone(), state, spec),
Ok(Duplicate)
);
assert_eq!(
op_pool.insert_deposit(deposit2, state, spec),
Ok(Replaced(Box::new(deposit1)))
);
}
#[test]
fn get_deposits_max() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (spec, mut state) = test_state(rng);
let op_pool = OperationPool::new();
let start = 10000;
let max_deposits = spec.max_deposits;
let extra = 5;
let offset = 1;
assert!(offset <= extra);
let deposits = dummy_deposits(rng, &state, &spec, start, max_deposits + extra);
for deposit in &deposits {
assert_eq!(
op_pool.insert_deposit(deposit.clone(), &state, &spec),
Ok(Fresh)
);
}
state.deposit_index = start + offset;
let deposits_for_block = op_pool.get_deposits(&state, &spec);
assert_eq!(deposits_for_block.len() as u64, max_deposits);
assert_eq!(
deposits_for_block[..],
deposits[offset as usize..(offset + max_deposits) as usize]
);
}
#[test]
fn prune_deposits() {
let rng = &mut XorShiftRng::from_seed([42; 16]);
let (spec, state) = test_state(rng);
let op_pool = OperationPool::new();
let start1 = 100;
// test is super slow in debug mode if this parameter is too high
let count = 5;
let gap = 25;
let start2 = start1 + count + gap;
let deposits1 = dummy_deposits(rng, &state, &spec, start1, count);
let deposits2 = dummy_deposits(rng, &state, &spec, start2, count);
for d in deposits1.into_iter().chain(deposits2) {
assert!(op_pool.insert_deposit(d, &state, &spec).is_ok());
}
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
let mut state = BeaconState::random_for_test(rng);
state.deposit_index = start1;
// Pruning the first bunch of deposits in batches of 5 should work.
let step = 5;
let mut pool_size = step + 2 * count as usize;
for i in (start1..=(start1 + count)).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
pool_size -= step;
assert_eq!(op_pool.num_deposits(), pool_size);
}
assert_eq!(pool_size, count as usize);
// Pruning in the gap should do nothing.
for i in (start1 + count..start2).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
assert_eq!(op_pool.num_deposits(), count as usize);
}
// Same again for the later deposits.
pool_size += step;
for i in (start2..=(start2 + count)).step_by(step) {
state.deposit_index = i;
op_pool.prune_deposits(&state);
pool_size -= step;
assert_eq!(op_pool.num_deposits(), pool_size);
}
assert_eq!(op_pool.num_deposits(), 0);
}
// Create a random deposit (with a valid proof of posession)
fn make_deposit(rng: &mut XorShiftRng, state: &BeaconState, spec: &ChainSpec) -> Deposit {
let keypair = Keypair::random();
let mut deposit = Deposit::random_for_test(rng);
let mut deposit_input = DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
proof_of_possession: Signature::empty_signature(),
};
deposit_input.proof_of_possession = deposit_input.create_proof_of_possession(
&keypair.sk,
state.slot.epoch(spec.slots_per_epoch),
&state.fork,
spec,
);
deposit.deposit_data.deposit_input = deposit_input;
deposit
}
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
fn dummy_deposits(
rng: &mut XorShiftRng,
state: &BeaconState,
spec: &ChainSpec,
start: u64,
count: u64,
) -> Vec<Deposit> {
let proto_deposit = make_deposit(rng, state, spec);
(start..start + count)
.map(|index| {
let mut deposit = proto_deposit.clone();
deposit.index = index;
deposit
})
.collect()
}
fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) {
let spec = ChainSpec::foundation();
let mut state = BeaconState::random_for_test(rng);
state.fork = Fork::genesis(&spec);
(spec, state)
}
/// Create a signed attestation for use in tests.
/// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`.
#[cfg(not(debug_assertions))]
fn signed_attestation<R: std::slice::SliceIndex<[usize], Output = [usize]>>(
committee: &CrosslinkCommittee,
keypairs: &[Keypair],
signing_range: R,
slot: Slot,
state: &BeaconState,
spec: &ChainSpec,
extra_signer: Option<usize>,
) -> Attestation {
let mut builder = TestingAttestationBuilder::new(
state,
&committee.committee,
slot,
committee.shard,
spec,
);
let signers = &committee.committee[signing_range];
let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::<Vec<_>>();
builder.sign(signers, &committee_keys, &state.fork, spec);
extra_signer.map(|c_idx| {
let validator_index = committee.committee[c_idx];
builder.sign(
&[validator_index],
&[&keypairs[validator_index].sk],
&state.fork,
spec,
)
});
builder.build()
}
/// Test state for attestation-related tests.
#[cfg(not(debug_assertions))]
fn attestation_test_state(
spec: &ChainSpec,
num_committees: usize,
) -> (BeaconState, Vec<Keypair>) {
let num_validators =
num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize;
let mut state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, spec);
let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2;
let slot = spec.genesis_slot + slot_offset;
state_builder.teleport_to_slot(slot, spec);
state_builder.build_caches(spec).unwrap();
state_builder.build()
}
/// Set the latest crosslink in the state to match the attestation.
#[cfg(not(debug_assertions))]
fn fake_latest_crosslink(att: &Attestation, state: &mut BeaconState, spec: &ChainSpec) {
state.latest_crosslinks[att.data.shard as usize] = Crosslink {
crosslink_data_root: att.data.crosslink_data_root,
epoch: att.data.slot.epoch(spec.slots_per_epoch),
};
}
#[test]
#[cfg(not(debug_assertions))]
fn test_attestation_score() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
for committee in committees {
let att1 = signed_attestation(&committee, keypairs, ..2, slot, state, spec, None);
let att2 = signed_attestation(&committee, keypairs, .., slot, state, spec, None);
assert_eq!(
att1.aggregation_bitfield.num_set_bits(),
attestation_score(&att1, state, spec)
);
state
.current_epoch_attestations
.push(PendingAttestation::from_attestation(&att1, state.slot));
assert_eq!(
committee.committee.len() - 2,
attestation_score(&att2, state, spec)
);
}
}
/// End-to-end test of basic attestation handling.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_aggregation_insert_get_prune() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
assert_eq!(
committees.len(),
1,
"we expect just one committee with this many validators"
);
for committee in &committees {
let step_size = 2;
for i in (0..committee.committee.len()).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + step_size,
slot,
state,
spec,
None,
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
}
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(op_pool.num_attestations(), committees.len());
// Before the min attestation inclusion delay, get_attestations shouldn't return anything.
assert_eq!(op_pool.get_attestations(state, spec).len(), 0);
// Then once the delay has elapsed, we should get a single aggregated attestation.
state.slot += spec.min_attestation_inclusion_delay;
let block_attestations = op_pool.get_attestations(state, spec);
assert_eq!(block_attestations.len(), committees.len());
let agg_att = &block_attestations[0];
assert_eq!(
agg_att.aggregation_bitfield.num_set_bits(),
spec.target_committee_size as usize
);
// Prune attestations shouldn't do anything at this point.
op_pool.prune_attestations(state, spec);
assert_eq!(op_pool.num_attestations(), committees.len());
// But once we advance to an epoch after the attestation, it should prune it out of
// existence.
state.slot = slot + spec.slots_per_epoch;
op_pool.prune_attestations(state, spec);
assert_eq!(op_pool.num_attestations(), 0);
}
/// Adding an attestation already in the pool should not increase the size of the pool.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_duplicate() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
for committee in &committees {
let att = signed_attestation(committee, keypairs, .., slot, state, spec, None);
fake_latest_crosslink(&att, state, spec);
op_pool
.insert_attestation(att.clone(), state, spec)
.unwrap();
op_pool.insert_attestation(att, state, spec).unwrap();
}
assert_eq!(op_pool.num_attestations(), committees.len());
}
/// Adding lots of attestations that only intersect pairwise should lead to two aggregate
/// attestations.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_pairwise_overlapping() {
let spec = &ChainSpec::foundation();
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
let step_size = 2;
for committee in &committees {
// Create attestations that overlap on `step_size` validators, like:
// {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ...
for i in (0..committee.committee.len() - step_size).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + 2 * step_size,
slot,
state,
spec,
None,
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
}
// The attestations should get aggregated into two attestations that comprise all
// validators.
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(op_pool.num_attestations(), 2 * committees.len());
}
/// Create a bunch of attestations signed by a small number of validators, and another
/// bunch signed by a larger number, such that there are at least `max_attestations`
/// signed by the larger number. Then, check that `get_attestations` only returns the
/// high-quality attestations. To ensure that no aggregation occurs, ALL attestations
/// are also signed by the 0th member of the committee.
#[test]
#[cfg(not(debug_assertions))]
fn attestation_get_max() {
let spec = &ChainSpec::foundation();
let small_step_size = 2;
let big_step_size = 4;
let (ref mut state, ref keypairs) = attestation_test_state(spec, big_step_size);
let op_pool = OperationPool::new();
let slot = state.slot - 1;
let committees = state
.get_crosslink_committees_at_slot(slot, spec)
.unwrap()
.clone();
let max_attestations = spec.max_attestations as usize;
let target_committee_size = spec.target_committee_size as usize;
let mut insert_attestations = |committee, step_size| {
for i in (0..target_committee_size).step_by(step_size) {
let att = signed_attestation(
committee,
keypairs,
i..i + step_size,
slot,
state,
spec,
if i == 0 { None } else { Some(0) },
);
fake_latest_crosslink(&att, state, spec);
op_pool.insert_attestation(att, state, spec).unwrap();
}
};
for committee in &committees {
assert_eq!(committee.committee.len(), target_committee_size);
// Attestations signed by only 2-3 validators
insert_attestations(committee, small_step_size);
// Attestations signed by 4+ validators
insert_attestations(committee, big_step_size);
}
let num_small = target_committee_size / small_step_size;
let num_big = target_committee_size / big_step_size;
assert_eq!(op_pool.attestations.read().len(), committees.len());
assert_eq!(
op_pool.num_attestations(),
(num_small + num_big) * committees.len()
);
assert!(op_pool.num_attestations() > max_attestations);
state.slot += spec.min_attestation_inclusion_delay;
let best_attestations = op_pool.get_attestations(state, spec);
assert_eq!(best_attestations.len(), max_attestations);
// All the best attestations should be signed by at least `big_step_size` (4) validators.
for att in &best_attestations {
assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size);
}
}
// TODO: more tests
}

View File

@ -14,6 +14,7 @@ env_logger = "0.6.0"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_yaml = "0.8" serde_yaml = "0.8"
yaml-utils = { path = "yaml_utils" }
[dependencies] [dependencies]
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }

View File

@ -1,708 +0,0 @@
title: Sanity tests -- small config -- 32 validators
summary: Basic sanity checks from phase 0 spec pythonization using a small state configuration and 32 validators.
All tests are run with `verify_signatures` as set to False.
Tests generated via https://github.com/ethereum/research/blob/master/spec_pythonizer/sanity_check.py
test_suite: beacon_state
fork: phase0-0.5.0
test_cases:
- name: test_empty_block_transition
config:
SHARD_COUNT: 8
TARGET_COMMITTEE_SIZE: 4
MAX_BALANCE_CHURN_QUOTIENT: 32
MAX_INDICES_PER_SLASHABLE_VOTE: 4096
MAX_EXIT_DEQUEUES_PER_EPOCH: 4
SHUFFLE_ROUND_COUNT: 90
DEPOSIT_CONTRACT_TREE_DEPTH: 32
MIN_DEPOSIT_AMOUNT: 1000000000
MAX_DEPOSIT_AMOUNT: 32000000000
FORK_CHOICE_BALANCE_INCREMENT: 1000000000
EJECTION_BALANCE: 16000000000
GENESIS_FORK_VERSION: 0
GENESIS_SLOT: 4294967296
GENESIS_EPOCH: 536870912
GENESIS_START_SHARD: 0
BLS_WITHDRAWAL_PREFIX_BYTE: '0x00'
SECONDS_PER_SLOT: 6
MIN_ATTESTATION_INCLUSION_DELAY: 2
SLOTS_PER_EPOCH: 8
MIN_SEED_LOOKAHEAD: 1
ACTIVATION_EXIT_DELAY: 4
EPOCHS_PER_ETH1_VOTING_PERIOD: 16
SLOTS_PER_HISTORICAL_ROOT: 64
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
PERSISTENT_COMMITTEE_PERIOD: 2048
LATEST_RANDAO_MIXES_LENGTH: 64
LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64
LATEST_SLASHED_EXIT_LENGTH: 64
BASE_REWARD_QUOTIENT: 32
WHISTLEBLOWER_REWARD_QUOTIENT: 512
ATTESTATION_INCLUSION_REWARD_QUOTIENT: 8
INACTIVITY_PENALTY_QUOTIENT: 16777216
MIN_PENALTY_QUOTIENT: 32
MAX_PROPOSER_SLASHINGS: 16
MAX_ATTESTER_SLASHINGS: 1
MAX_ATTESTATIONS: 128
MAX_DEPOSITS: 16
MAX_VOLUNTARY_EXITS: 16
MAX_TRANSFERS: 16
DOMAIN_BEACON_BLOCK: 0
DOMAIN_RANDAO: 1
DOMAIN_ATTESTATION: 2
DOMAIN_DEPOSIT: 3
DOMAIN_VOLUNTARY_EXIT: 4
DOMAIN_TRANSFER: 5
verify_signatures: false
initial_state:
slot: 4294967296
genesis_time: 0
fork:
previous_version: '0x00000000'
current_version: '0x00000000'
epoch: 536870912
validator_registry:
- pubkey: '0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000001'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa572cbea904d67468808c8eb50a9450c9721db309128012543902d0ac358a62ae28f75bb8f1c7c42c39a8c5529bf0f4e'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000002'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x89ece308f9d1f0131765212deca99697b112d61f9be9a5f1f3780a51335b3ff981747a0b2ca2179b96d2c0c9024e5224'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000003'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xac9b60d5afcbd5663a8a44b7c5a02f19e9a77ab0a35bd65809bb5c67ec582c897feb04decc694b13e08587f3ff9b5b60'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000004'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb0e7791fb972fe014159aa33a98622da3cdc98ff707965e536d8636b5fcc5ac7a91a8c46e59a00dca575af0f18fb13dc'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000005'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa6e82f6da4520f85c5d27d8f329eccfa05944fd1096b20734c894966d12a9e2a9a9744529d7212d33883113a0cadb909'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000006'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb928f3beb93519eecf0145da903b40a4c97dca00b21f12ac0df3be9116ef2ef27b2ae6bcd4c5bc2d54ef5a70627efcb7'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000007'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa85ae765588126f5e860d019c0e26235f567a9c0c0b2d8ff30f3e8d436b1082596e5e7462d20f5be3764fd473e57f9cf'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000008'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x99cdf3807146e68e041314ca93e1fee0991224ec2a74beb2866816fd0826ce7b6263ee31e953a86d1b72cc2215a57793'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000009'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xaf81da25ecf1c84b577fefbedd61077a81dc43b00304015b2b596ab67f00e41c86bb00ebd0f90d4b125eb0539891aeed'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000a'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x80fd75ebcc0a21649e3177bcce15426da0e4f25d6828fbf4038d4d7ed3bd4421de3ef61d70f794687b12b2d571971a55'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000b'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x8345dd80ffef0eaec8920e39ebb7f5e9ae9c1d6179e9129b705923df7830c67f3690cbc48649d4079eadf5397339580c'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000c'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x851f8a0b82a6d86202a61cbc3b0f3db7d19650b914587bde4715ccd372e1e40cab95517779d840416e1679c84a6db24e'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000d'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x99bef05aaba1ea467fcbc9c420f5e3153c9d2b5f9bf2c7e2e7f6946f854043627b45b008607b9a9108bb96f3c1c089d3'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000e'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x8d9e19b3f4c7c233a6112e5397309f9812a4f61f754f11dd3dcb8b07d55a7b1dfea65f19a1488a14fef9a41495083582'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000000f'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa73eb991aa22cdb794da6fcde55a427f0a4df5a4a70de23a988b5e5fc8c4d844f66d990273267a54dd21579b7ba6a086'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000010'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb098f178f84fc753a76bb63709e9be91eec3ff5f7f3a5f4836f34fe8a1a6d6c5578d8fd820573cef3a01e2bfef3eaf3a'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000011'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x9252a4ac3529f8b2b6e8189b95a60b8865f07f9a9b73f98d5df708511d3f68632c4c7d1e2b03e6b1d1e2c01839752ada'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000012'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb271205227c7aa27f45f20b3ba380dfea8b51efae91fd32e552774c99e2a1237aa59c0c43f52aad99bba3783ea2f36a4'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000013'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa272e9d1d50a4aea7d8f0583948090d0888be5777f2846800b8281139cd4aa9eee05f89b069857a3e77ccfaae1615f9c'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000014'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x9780e853f8ce7eda772c6691d25e220ca1d2ab0db51a7824b700620f7ac94c06639e91c98bb6abd78128f0ec845df8ef'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000015'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xab48aa2cc6f4a0bb63b5d67be54ac3aed10326dda304c5aeb9e942b40d6e7610478377680ab90e092ef1895e62786008'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000016'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x8c8b694b04d98a749a0763c72fc020ef61b2bb3f63ebb182cb2e568f6a8b9ca3ae013ae78317599e7e7ba2a528ec754a'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000017'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x9717182463fbe215168e6762abcbb55c5c65290f2b5a2af616f8a6f50d625b46164178a11622d21913efdfa4b800648d'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000018'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xacb58c81ae0cae2e9d4d446b730922239923c345744eee58efaadb36e9a0925545b18a987acf0bad469035b291e37269'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000019'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x81ccc19e3b938ec2405099e90022a4218baa5082a3ca0974b24be0bc8b07e5fffaed64bef0d02c4dbfb6a307829afc5c'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001a'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xab83dfefb120fab7665a607d749ef1765fbb3cc0ba5827a20a135402c09d987c701ddb5b60f0f5495026817e8ab6ea2e'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001b'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb6ad11e5d15f77c1143b1697344911b9c590110fdd8dd09df2e58bfd757269169deefe8be3544d4e049fb3776fb0bcfb'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001c'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0x8515e7f61ca0470e165a44d247a23f17f24bf6e37185467bedb7981c1003ea70bbec875703f793dd8d11e56afa7f74ba'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001d'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xad84464b3966ec5bede84aa487facfca7823af383715078da03b387cc2f5d5597cdd7d025aa07db00a38b953bdeb6e3f'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001e'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xb29043a7273d0a2dbc2b747dcf6a5eccbd7ccb44b2d72e985537b117929bc3fd3a99001481327788ad040b4077c47c0d'
withdrawal_credentials: '0x000000000000000000000000000000000000000000000000000000000000001f'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
- pubkey: '0xa72841987e4f219d54f2b6a9eac5fe6e78704644753c3579e776a3691bc123743f8c63770ed0f72a71e9e964dbf58f43'
withdrawal_credentials: '0x0000000000000000000000000000000000000000000000000000000000000020'
activation_epoch: 536870912
exit_epoch: 18446744073709551615
withdrawable_epoch: 18446744073709551615
initiated_exit: false
slashed: false
validator_balances:
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
- 32000000000
validator_registry_update_epoch: 536870912
latest_randao_mixes:
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
previous_shuffling_start_shard: 0
current_shuffling_start_shard: 0
previous_shuffling_epoch: 536870912
current_shuffling_epoch: 536870912
previous_shuffling_seed: '0x0000000000000000000000000000000000000000000000000000000000000000'
current_shuffling_seed: '0x7a81d831e99dc63f9f10d4abce84c26473d4c2f65ec4acf9000684059473b072'
previous_epoch_attestations: []
current_epoch_attestations: []
previous_justified_epoch: 536870912
current_justified_epoch: 536870912
previous_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
current_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
justification_bitfield: 0
finalized_epoch: 536870912
finalized_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
latest_crosslinks:
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
- epoch: 536870912
crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
latest_block_roots:
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
latest_state_roots:
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
- '0x0000000000000000000000000000000000000000000000000000000000000000'
latest_active_index_roots:
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
- '0x429a7560eb31fa5d1192496997a78ffc590e70f5b39220abff4420298061501a'
latest_slashed_balances:
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
latest_block_header:
slot: 4294967296
previous_block_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
state_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
block_body_root: '0x13f2001ff0ee4a528b3c43f63d70a997aefca990ed8eada2223ee6ec3807f7cc'
signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
historical_roots: []
latest_eth1_data:
deposit_root: '0x826d25bfcb9161aabc799844c5176f7b3444dc5288856f65e0b8060560488912'
block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'
eth1_data_votes: []
deposit_index: 32
blocks:
- slot: 4294967297
previous_block_root: '0x2befbd4b4fe8c91f3059082c8048e3376a9b7fb309e93044fac32b7cc8849773'
state_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
body:
randao_reveal: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
eth1_data:
deposit_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'
proposer_slashings: []
attester_slashings: []
attestations: []
deposits: []
voluntary_exits: []
transfers: []
signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
expected_state:
slot: 4294967297

View File

@ -1,4 +1,3 @@
use self::verify_proposer_slashing::verify_proposer_slashing;
use crate::common::slash_validator; use crate::common::slash_validator;
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
use rayon::prelude::*; use rayon::prelude::*;
@ -6,13 +5,20 @@ use ssz::{SignedRoot, TreeHash};
use types::*; use types::*;
pub use self::verify_attester_slashing::{ pub use self::verify_attester_slashing::{
gather_attester_slashing_indices, verify_attester_slashing, gather_attester_slashing_indices, gather_attester_slashing_indices_modular,
verify_attester_slashing,
};
pub use self::verify_proposer_slashing::verify_proposer_slashing;
pub use validate_attestation::{
validate_attestation, validate_attestation_time_independent_only,
validate_attestation_without_signature,
}; };
pub use validate_attestation::{validate_attestation, validate_attestation_without_signature};
pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index};
pub use verify_exit::verify_exit; pub use verify_exit::{verify_exit, verify_exit_time_independent_only};
pub use verify_slashable_attestation::verify_slashable_attestation; pub use verify_slashable_attestation::verify_slashable_attestation;
pub use verify_transfer::{execute_transfer, verify_transfer}; pub use verify_transfer::{
execute_transfer, verify_transfer, verify_transfer_time_independent_only,
};
pub mod errors; pub mod errors;
mod validate_attestation; mod validate_attestation;
@ -316,13 +322,7 @@ pub fn process_attestations(
// Update the state in series. // Update the state in series.
for attestation in attestations { for attestation in attestations {
let pending_attestation = PendingAttestation { let pending_attestation = PendingAttestation::from_attestation(attestation, state.slot);
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
if attestation_epoch == state.current_epoch(spec) { if attestation_epoch == state.current_epoch(spec) {

View File

@ -390,6 +390,11 @@ pub enum TransferInvalid {
/// ///
/// (state_slot, transfer_slot) /// (state_slot, transfer_slot)
StateSlotMismatch(Slot, Slot), StateSlotMismatch(Slot, Slot),
/// The `transfer.slot` is in the past relative to the state slot.
///
///
/// (state_slot, transfer_slot)
TransferSlotInPast(Slot, Slot),
/// The `transfer.from` validator has been activated and is not withdrawable. /// The `transfer.from` validator has been activated and is not withdrawable.
/// ///
/// (from_validator) /// (from_validator)

View File

@ -14,7 +14,16 @@ pub fn validate_attestation(
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, true) validate_attestation_parametric(state, attestation, spec, true, false)
}
/// Like `validate_attestation` but doesn't run checks which may become true in future states.
pub fn validate_attestation_time_independent_only(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), Error> {
validate_attestation_parametric(state, attestation, spec, true, true)
} }
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
@ -28,7 +37,7 @@ pub fn validate_attestation_without_signature(
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
validate_attestation_signature_optional(state, attestation, spec, false) validate_attestation_parametric(state, attestation, spec, false, false)
} }
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
@ -36,15 +45,13 @@ pub fn validate_attestation_without_signature(
/// ///
/// ///
/// Spec v0.5.0 /// Spec v0.5.0
fn validate_attestation_signature_optional( fn validate_attestation_parametric(
state: &BeaconState, state: &BeaconState,
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
verify_signature: bool, verify_signature: bool,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
// Can't submit pre-historic attestations. // Can't submit pre-historic attestations.
verify!( verify!(
attestation.data.slot >= spec.genesis_slot, attestation.data.slot >= spec.genesis_slot,
@ -65,7 +72,8 @@ fn validate_attestation_signature_optional(
// Can't submit attestation too quickly. // Can't submit attestation too quickly.
verify!( verify!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, time_independent_only
|| attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
Invalid::IncludedTooEarly { Invalid::IncludedTooEarly {
state: state.slot, state: state.slot,
delay: spec.min_attestation_inclusion_delay, delay: spec.min_attestation_inclusion_delay,
@ -74,40 +82,8 @@ fn validate_attestation_signature_optional(
); );
// Verify the justified epoch and root is correct. // Verify the justified epoch and root is correct.
if attestation_epoch >= state_epoch { if !time_independent_only {
verify!( verify_justified_epoch_and_root(attestation, state, spec)?;
attestation.data.source_epoch == state.current_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.current_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: true,
}
);
verify!(
attestation.data.source_root == state.current_justified_root,
Invalid::WrongJustifiedRoot {
state: state.current_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} else {
verify!(
attestation.data.source_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.previous_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: false,
}
);
verify!(
attestation.data.source_root == state.previous_justified_root,
Invalid::WrongJustifiedRoot {
state: state.previous_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} }
// Check that the crosslink data is valid. // Check that the crosslink data is valid.
@ -188,6 +164,56 @@ fn validate_attestation_signature_optional(
Ok(()) Ok(())
} }
/// Verify that the `source_epoch` and `source_root` of an `Attestation` correctly
/// match the current (or previous) justified epoch and root from the state.
///
/// Spec v0.5.0
fn verify_justified_epoch_and_root(
attestation: &Attestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), Error> {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
if attestation_epoch >= state_epoch {
verify!(
attestation.data.source_epoch == state.current_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.current_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: true,
}
);
verify!(
attestation.data.source_root == state.current_justified_root,
Invalid::WrongJustifiedRoot {
state: state.current_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
} else {
verify!(
attestation.data.source_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch {
state: state.previous_justified_epoch,
attestation: attestation.data.source_epoch,
is_current: false,
}
);
verify!(
attestation.data.source_root == state.previous_justified_root,
Invalid::WrongJustifiedRoot {
state: state.previous_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
);
}
Ok(())
}
/// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the /// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the
/// `aggregate_signature` is valid. /// `aggregate_signature` is valid.
/// ///

View File

@ -47,6 +47,25 @@ pub fn gather_attester_slashing_indices(
attester_slashing: &AttesterSlashing, attester_slashing: &AttesterSlashing,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Vec<u64>, Error> { ) -> Result<Vec<u64>, Error> {
gather_attester_slashing_indices_modular(
state,
attester_slashing,
|_, validator| validator.slashed,
spec,
)
}
/// Same as `gather_attester_slashing_indices` but allows the caller to specify the criteria
/// for determining whether a given validator should be considered slashed.
pub fn gather_attester_slashing_indices_modular<F>(
state: &BeaconState,
attester_slashing: &AttesterSlashing,
is_slashed: F,
spec: &ChainSpec,
) -> Result<Vec<u64>, Error>
where
F: Fn(u64, &Validator) -> bool,
{
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
@ -57,7 +76,7 @@ pub fn gather_attester_slashing_indices(
.get(*i as usize) .get(*i as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?;
if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { if slashable_attestation_2.validator_indices.contains(&i) & !is_slashed(*i, validator) {
// TODO: verify that we should reject any slashable attestation which includes a // TODO: verify that we should reject any slashable attestation which includes a
// withdrawn validator. PH has asked the question on gitter, awaiting response. // withdrawn validator. PH has asked the question on gitter, awaiting response.
verify!( verify!(

View File

@ -12,6 +12,25 @@ pub fn verify_exit(
state: &BeaconState, state: &BeaconState,
exit: &VoluntaryExit, exit: &VoluntaryExit,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, false)
}
/// Like `verify_exit` but doesn't run checks which may become true in future states.
pub fn verify_exit_time_independent_only(
state: &BeaconState,
exit: &VoluntaryExit,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, true)
}
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
fn verify_exit_parametric(
state: &BeaconState,
exit: &VoluntaryExit,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let validator = state let validator = state
.validator_registry .validator_registry
@ -32,7 +51,7 @@ pub fn verify_exit(
// Exits must specify an epoch when they become valid; they are not valid before then. // Exits must specify an epoch when they become valid; they are not valid before then.
verify!( verify!(
state.current_epoch(spec) >= exit.epoch, time_independent_only || state.current_epoch(spec) >= exit.epoch,
Invalid::FutureEpoch { Invalid::FutureEpoch {
state: state.current_epoch(spec), state: state.current_epoch(spec),
exit: exit.epoch exit: exit.epoch

View File

@ -15,6 +15,25 @@ pub fn verify_transfer(
state: &BeaconState, state: &BeaconState,
transfer: &Transfer, transfer: &Transfer,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, false)
}
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
pub fn verify_transfer_time_independent_only(
state: &BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, true)
}
/// Parametric version of `verify_transfer` that allows some checks to be skipped.
fn verify_transfer_parametric(
state: &BeaconState,
transfer: &Transfer,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let sender_balance = *state let sender_balance = *state
.validator_balances .validator_balances
@ -27,17 +46,18 @@ pub fn verify_transfer(
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
verify!( verify!(
sender_balance >= transfer.amount, time_independent_only || sender_balance >= transfer.amount,
Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) Invalid::FromBalanceInsufficient(transfer.amount, sender_balance)
); );
verify!( verify!(
sender_balance >= transfer.fee, time_independent_only || sender_balance >= transfer.fee,
Invalid::FromBalanceInsufficient(transfer.fee, sender_balance) Invalid::FromBalanceInsufficient(transfer.fee, sender_balance)
); );
verify!( verify!(
(sender_balance == total_amount) time_independent_only
|| (sender_balance == total_amount)
|| (sender_balance >= (total_amount + spec.min_deposit_amount)), || (sender_balance >= (total_amount + spec.min_deposit_amount)),
Invalid::InvalidResultingFromBalance( Invalid::InvalidResultingFromBalance(
sender_balance - total_amount, sender_balance - total_amount,
@ -45,10 +65,17 @@ pub fn verify_transfer(
) )
); );
verify!( if time_independent_only {
state.slot == transfer.slot, verify!(
Invalid::StateSlotMismatch(state.slot, transfer.slot) state.slot <= transfer.slot,
); Invalid::TransferSlotInPast(state.slot, transfer.slot)
);
} else {
verify!(
state.slot == transfer.slot,
Invalid::StateSlotMismatch(state.slot, transfer.slot)
);
}
let sender_validator = state let sender_validator = state
.validator_registry .validator_registry
@ -57,7 +84,8 @@ pub fn verify_transfer(
let epoch = state.slot.epoch(spec.slots_per_epoch); let epoch = state.slot.epoch(spec.slots_per_epoch);
verify!( verify!(
sender_validator.is_withdrawable_at(epoch) time_independent_only
|| sender_validator.is_withdrawable_at(epoch)
|| sender_validator.activation_epoch == spec.far_future_epoch, || sender_validator.activation_epoch == spec.far_future_epoch,
Invalid::FromValidatorIneligableForTransfer(transfer.sender) Invalid::FromValidatorIneligableForTransfer(transfer.sender)
); );

View File

@ -227,7 +227,7 @@ impl ValidatorStatuses {
status.is_previous_epoch_attester = true; status.is_previous_epoch_attester = true;
// The inclusion slot and distance are only required for previous epoch attesters. // The inclusion slot and distance are only required for previous epoch attesters.
let relative_epoch = RelativeEpoch::from_slot(state.slot, a.data.slot, spec)?; let relative_epoch = RelativeEpoch::from_slot(state.slot, a.inclusion_slot, spec)?;
status.inclusion_info = Some(InclusionInfo { status.inclusion_info = Some(InclusionInfo {
slot: a.inclusion_slot, slot: a.inclusion_slot,
distance: inclusion_distance(a), distance: inclusion_distance(a),

View File

@ -1,5 +1,14 @@
use serde_derive::Deserialize; use serde_derive::Deserialize;
use serde_yaml;
#[cfg(not(debug_assertions))]
use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing,
};
use std::{fs::File, io::prelude::*, path::PathBuf};
use types::*; use types::*;
#[allow(unused_imports)]
use yaml_utils;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct TestCase { pub struct TestCase {
@ -19,13 +28,11 @@ pub struct TestDoc {
} }
#[test] #[test]
fn yaml() { fn test_read_yaml() {
use serde_yaml; // Test sanity-check_small-config_32-vals.yaml
use std::{fs::File, io::prelude::*, path::PathBuf};
let mut file = { let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("specs/example.yml"); file_path_buf.push("yaml_utils/specs/sanity-check_small-config_32-vals.yaml");
File::open(file_path_buf).unwrap() File::open(file_path_buf).unwrap()
}; };
@ -34,7 +41,68 @@ fn yaml() {
file.read_to_string(&mut yaml_str).unwrap(); file.read_to_string(&mut yaml_str).unwrap();
let yaml_str = yaml_str.to_lowercase(); yaml_str = yaml_str.to_lowercase();
let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
// Test sanity-check_default-config_100-vals.yaml
file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("yaml_utils/specs/sanity-check_default-config_100-vals.yaml");
File::open(file_path_buf).unwrap()
};
yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
yaml_str = yaml_str.to_lowercase();
let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
} }
#[test]
#[cfg(not(debug_assertions))]
fn run_state_transition_tests_small() {
// Test sanity-check_small-config_32-vals.yaml
let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("yaml_utils/specs/sanity-check_small-config_32-vals.yaml");
File::open(file_path_buf).unwrap()
};
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
yaml_str = yaml_str.to_lowercase();
let doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
// Run Tests
for (i, test_case) in doc.test_cases.iter().enumerate() {
let mut state = test_case.initial_state.clone();
for block in test_case.blocks.iter() {
while block.slot > state.slot {
let latest_block_header = state.latest_block_header.clone();
per_slot_processing(&mut state, &latest_block_header, &test_case.config).unwrap();
}
if test_case.verify_signatures {
let res = per_block_processing(&mut state, &block, &test_case.config);
if res.is_err() {
println!("{:?}", i);
println!("{:?}", res);
};
} else {
let res = per_block_processing_without_verifying_block_signature(
&mut state,
&block,
&test_case.config,
);
if res.is_err() {
println!("{:?}", i);
println!("{:?}", res);
}
}
}
}
}

View File

@ -0,0 +1,15 @@
[package]
name = "yaml-utils"
version = "0.1.0"
authors = ["Kirk Baird <baird.k@outlook.com>"]
edition = "2018"
[build-dependencies]
reqwest = "0.9"
tempdir = "0.3"
[dependencies]
[lib]
name = "yaml_utils"
path = "src/lib.rs"

View File

@ -0,0 +1,28 @@
extern crate reqwest;
extern crate tempdir;
use std::fs::File;
use std::io::copy;
fn main() {
// These test files are not to be stored in the lighthouse repo as they are quite large (32MB).
// They will be downloaded at build time by yaml-utils crate (in build.rs)
let git_path = "https://raw.githubusercontent.com/ethereum/eth2.0-tests/master/state/";
let test_names = vec![
"sanity-check_default-config_100-vals.yaml",
"sanity-check_small-config_32-vals.yaml",
];
for test in test_names {
let mut target = String::from(git_path);
target.push_str(test);
let mut response = reqwest::get(target.as_str()).unwrap();
let mut dest = {
let mut file_name = String::from("specs/");
file_name.push_str(test);
File::create(file_name).unwrap()
};
copy(&mut response, &mut dest).unwrap();
}
}

View File

@ -0,0 +1 @@
*.yaml

View File

@ -0,0 +1 @@
// This is a place holder such that yaml-utils is now a crate hence build.rs will be run when 'cargo test' is called

View File

@ -8,6 +8,7 @@ edition = "2018"
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
boolean-bitfield = { path = "../utils/boolean-bitfield" } boolean-bitfield = { path = "../utils/boolean-bitfield" }
dirs = "1.0" dirs = "1.0"
derivative = "1.0"
ethereum-types = "0.5" ethereum-types = "0.5"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
hex = "0.3" hex = "0.3"

View File

@ -28,6 +28,29 @@ pub struct Attestation {
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
impl Attestation {
/// Are the aggregation bitfields of these attestations disjoint?
pub fn signers_disjoint_from(&self, other: &Attestation) -> bool {
self.aggregation_bitfield
.intersection(&other.aggregation_bitfield)
.is_zero()
}
/// Aggregate another Attestation into this one.
///
/// The aggregation bitfields must be disjoint, and the data must be the same.
pub fn aggregate(&mut self, other: &Attestation) {
debug_assert_eq!(self.data, other.data);
debug_assert!(self.signers_disjoint_from(other));
self.aggregation_bitfield
.union_inplace(&other.aggregation_bitfield);
self.custody_bitfield.union_inplace(&other.custody_bitfield);
self.aggregate_signature
.add_aggregate(&other.aggregate_signature);
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,9 +1,10 @@
use crate::*; use crate::*;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)]
pub struct AttestationDuty { pub struct AttestationDuty {
pub slot: Slot, pub slot: Slot,
pub shard: Shard, pub shard: Shard,
pub committee_index: usize, pub committee_index: usize,
pub committee_len: usize,
} }

View File

@ -661,6 +661,17 @@ impl BeaconState {
}) })
} }
/// Build all the caches, if they need to be built.
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
self.build_epoch_cache(RelativeEpoch::Previous, spec)?;
self.build_epoch_cache(RelativeEpoch::Current, spec)?;
self.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, spec)?;
self.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, spec)?;
self.update_pubkey_cache()?;
Ok(())
}
/// Build an epoch cache, unless it is has already been built. /// Build an epoch cache, unless it is has already been built.
pub fn build_epoch_cache( pub fn build_epoch_cache(
&mut self, &mut self,

View File

@ -92,6 +92,7 @@ impl EpochCache {
slot, slot,
shard, shard,
committee_index: k, committee_index: k,
committee_len: crosslink_committee.committee.len(),
}; };
attestation_duties[*validator_index] = Some(attestation_duty) attestation_duties[*validator_index] = Some(attestation_duty)
} }

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{AttestationData, Bitfield, Slot}; use crate::{Attestation, AttestationData, Bitfield, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
@ -16,6 +16,18 @@ pub struct PendingAttestation {
pub inclusion_slot: Slot, pub inclusion_slot: Slot,
} }
impl PendingAttestation {
/// Create a `PendingAttestation` from an `Attestation`, at the given `inclusion_slot`.
pub fn from_attestation(attestation: &Attestation, inclusion_slot: Slot) -> Self {
PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -113,6 +113,16 @@ mod epoch_tests {
all_tests!(Epoch); all_tests!(Epoch);
#[test]
fn epoch_start_end() {
let slots_per_epoch = 8;
let epoch = Epoch::new(0);
assert_eq!(epoch.start_slot(slots_per_epoch), Slot::new(0));
assert_eq!(epoch.end_slot(slots_per_epoch), Slot::new(7));
}
#[test] #[test]
fn slot_iter() { fn slot_iter() {
let slots_per_epoch = 8; let slots_per_epoch = 8;
@ -130,4 +140,15 @@ mod epoch_tests {
assert_eq!(Slot::from(i), slots[i as usize]) assert_eq!(Slot::from(i), slots[i as usize])
} }
} }
#[test]
fn max_epoch_ssz() {
let max_epoch = Epoch::max_value();
let mut ssz = SszStream::new();
ssz.append(&max_epoch);
let encoded = ssz.drain();
assert_eq!(&encoded, &[255, 255, 255, 255, 255, 255, 255, 255]);
let (decoded, _i): (Epoch, usize) = <_>::ssz_decode(&encoded, 0).unwrap();
assert_eq!(max_epoch, decoded);
}
} }

View File

@ -19,7 +19,7 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec<Keypair> {
.collect::<Vec<usize>>() .collect::<Vec<usize>>()
.par_iter() .par_iter()
.map(|&i| { .map(|&i| {
let secret = int_to_bytes48(i as u64 + 1); let secret = int_to_bytes48(i as u64 + 1000);
let sk = SecretKey::from_bytes(&secret).unwrap(); let sk = SecretKey::from_bytes(&secret).unwrap();
let pk = PublicKey::from_secret_key(&sk); let pk = PublicKey::from_secret_key(&sk);
Keypair { sk, pk } Keypair { sk, pk }

View File

@ -11,7 +11,7 @@ macro_rules! ssz_tests {
let original = $type::random_for_test(&mut rng); let original = $type::random_for_test(&mut rng);
let bytes = ssz_encode(&original); let bytes = ssz_encode(&original);
let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); let (decoded, _): ($type, usize) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }

View File

@ -1,6 +1,8 @@
use serde::de::Error; use serde::de::Error;
use serde::{Deserialize, Deserializer}; use serde::{Deserialize, Deserializer};
pub const FORK_BYTES_LEN: usize = 4;
pub fn u8_from_hex_str<'de, D>(deserializer: D) -> Result<u8, D::Error> pub fn u8_from_hex_str<'de, D>(deserializer: D) -> Result<u8, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
@ -10,14 +12,18 @@ where
u8::from_str_radix(&s.as_str()[2..], 16).map_err(D::Error::custom) u8::from_str_radix(&s.as_str()[2..], 16).map_err(D::Error::custom)
} }
pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; 4], D::Error> pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let s: String = Deserialize::deserialize(deserializer)?; let s: String = Deserialize::deserialize(deserializer)?;
let mut array = [0 as u8; 4]; let mut array = [0 as u8; FORK_BYTES_LEN];
let decoded: Vec<u8> = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; let decoded: Vec<u8> = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?;
if decoded.len() > FORK_BYTES_LEN {
return Err(D::Error::custom("Fork length too long"));
}
for (i, item) in array.iter_mut().enumerate() { for (i, item) in array.iter_mut().enumerate() {
if i > decoded.len() { if i > decoded.len() {
break; break;

View File

@ -6,6 +6,7 @@ use dirs;
use log::debug; use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::SystemTime;
pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs";
@ -120,7 +121,17 @@ impl TestingBeaconStateBuilder {
}) })
.collect(); .collect();
let genesis_time = 1553753928; // arbitrary // TODO: Testing only. Burn with fire later.
// set genesis to the last 30 minute block.
// this is used for testing only. Allows multiple nodes to connect within a 30min window
// and agree on a genesis
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
// genesis is now the last 30 minute block.
let genesis_time = now - secs_after_last_period;
let mut state = BeaconState::genesis( let mut state = BeaconState::genesis(
genesis_time, genesis_time,

View File

@ -1,6 +1,7 @@
use super::Slot; use super::Slot;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use bls::{PublicKey, Signature}; use bls::{PublicKey, Signature};
use derivative::Derivative;
use rand::RngCore; use rand::RngCore;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
@ -12,7 +13,6 @@ use test_random_derive::TestRandom;
/// Spec v0.5.0 /// Spec v0.5.0
#[derive( #[derive(
Debug, Debug,
PartialEq,
Clone, Clone,
Serialize, Serialize,
Deserialize, Deserialize,
@ -21,7 +21,9 @@ use test_random_derive::TestRandom;
TreeHash, TreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
Derivative,
)] )]
#[derivative(PartialEq, Eq, Hash)]
pub struct Transfer { pub struct Transfer {
pub sender: u64, pub sender: u64,
pub recipient: u64, pub recipient: u64,
@ -29,6 +31,7 @@ pub struct Transfer {
pub fee: u64, pub fee: u64,
pub slot: Slot, pub slot: Slot,
pub pubkey: PublicKey, pub pubkey: PublicKey,
#[derivative(Hash = "ignore")]
pub signature: Signature, pub signature: Signature,
} }

View File

@ -1,7 +1,7 @@
use super::PublicKey; use super::PublicKey;
use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey; use bls_aggregates::AggregatePublicKey as RawAggregatePublicKey;
/// A single BLS signature. /// A BLS aggregate public key.
/// ///
/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ
/// serialization). /// serialization).
@ -17,7 +17,7 @@ impl AggregatePublicKey {
self.0.add(public_key.as_raw()) self.0.add(public_key.as_raw())
} }
/// Returns the underlying signature. /// Returns the underlying public key.
pub fn as_raw(&self) -> &RawAggregatePublicKey { pub fn as_raw(&self) -> &RawAggregatePublicKey {
&self.0 &self.0
} }

View File

@ -1,30 +1,45 @@
use super::{AggregatePublicKey, Signature}; use super::{AggregatePublicKey, Signature, BLS_AGG_SIG_BYTE_SIZE};
use bls_aggregates::{ use bls_aggregates::{
AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature,
}; };
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use serde_hex::{encode as hex_encode, HexVisitor};
use ssz::{ use ssz::{decode, hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash,
};
/// A BLS aggregate signature. /// A BLS aggregate signature.
/// ///
/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ
/// serialization). /// serialization).
#[derive(Debug, PartialEq, Clone, Default, Eq)] #[derive(Debug, PartialEq, Clone, Default, Eq)]
pub struct AggregateSignature(RawAggregateSignature); pub struct AggregateSignature {
aggregate_signature: RawAggregateSignature,
is_empty: bool,
}
impl AggregateSignature { impl AggregateSignature {
/// Instantiate a new AggregateSignature. /// Instantiate a new AggregateSignature.
///
/// is_empty is false
/// AggregateSiganture is point at infinity
pub fn new() -> Self { pub fn new() -> Self {
AggregateSignature(RawAggregateSignature::new()) Self {
aggregate_signature: RawAggregateSignature::new(),
is_empty: false,
}
} }
/// Add (aggregate) a signature to the `AggregateSignature`. /// Add (aggregate) a signature to the `AggregateSignature`.
pub fn add(&mut self, signature: &Signature) { pub fn add(&mut self, signature: &Signature) {
self.0.add(signature.as_raw()) if !self.is_empty {
self.aggregate_signature.add(signature.as_raw())
}
}
/// Add (aggregate) another `AggregateSignature`.
pub fn add_aggregate(&mut self, agg_signature: &AggregateSignature) {
self.aggregate_signature
.add_aggregate(&agg_signature.aggregate_signature)
} }
/// Verify the `AggregateSignature` against an `AggregatePublicKey`. /// Verify the `AggregateSignature` against an `AggregatePublicKey`.
@ -37,7 +52,11 @@ impl AggregateSignature {
domain: u64, domain: u64,
aggregate_public_key: &AggregatePublicKey, aggregate_public_key: &AggregatePublicKey,
) -> bool { ) -> bool {
self.0.verify(msg, domain, aggregate_public_key.as_raw()) if self.is_empty {
return false;
}
self.aggregate_signature
.verify(msg, domain, aggregate_public_key.as_raw())
} }
/// Verify this AggregateSignature against multiple AggregatePublickeys with multiple Messages. /// Verify this AggregateSignature against multiple AggregatePublickeys with multiple Messages.
@ -50,6 +69,9 @@ impl AggregateSignature {
domain: u64, domain: u64,
aggregate_public_keys: &[&AggregatePublicKey], aggregate_public_keys: &[&AggregatePublicKey],
) -> bool { ) -> bool {
if self.is_empty {
return false;
}
let aggregate_public_keys: Vec<&RawAggregatePublicKey> = let aggregate_public_keys: Vec<&RawAggregatePublicKey> =
aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect(); aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect();
@ -59,50 +81,93 @@ impl AggregateSignature {
msg.extend_from_slice(message); msg.extend_from_slice(message);
} }
self.0 self.aggregate_signature
.verify_multiple(&msg[..], domain, &aggregate_public_keys[..]) .verify_multiple(&msg[..], domain, &aggregate_public_keys[..])
} }
/// Return AggregateSiganture as bytes
pub fn as_bytes(&self) -> Vec<u8> {
if self.is_empty {
return vec![0; BLS_AGG_SIG_BYTE_SIZE];
}
self.aggregate_signature.as_bytes()
}
/// Convert bytes to AggregateSiganture
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
for byte in bytes {
if *byte != 0 {
let sig =
RawAggregateSignature::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?;
return Ok(Self {
aggregate_signature: sig,
is_empty: false,
});
}
}
Ok(Self::empty_signature())
}
/// Returns if the AggregateSiganture `is_empty`
pub fn is_empty(&self) -> bool {
self.is_empty
}
/// Creates a new AggregateSignature
///
/// aggregate_signature set to the point infinity
/// is_empty set to true
pub fn empty_signature() -> Self {
Self {
aggregate_signature: RawAggregateSignature::new(),
is_empty: true,
}
}
} }
impl Encodable for AggregateSignature { impl Encodable for AggregateSignature {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.0.as_bytes()); s.append_encoded_raw(&self.as_bytes());
} }
} }
impl Decodable for AggregateSignature { impl Decodable for AggregateSignature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_AGG_SIG_BYTE_SIZE {
let raw_sig = return Err(DecodeError::TooShort);
RawAggregateSignature::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; }
Ok((AggregateSignature(raw_sig), i)) let agg_sig = AggregateSignature::from_bytes(&bytes[i..(i + BLS_AGG_SIG_BYTE_SIZE)])
.map_err(|_| DecodeError::Invalid)?;
Ok((agg_sig, i + BLS_AGG_SIG_BYTE_SIZE))
} }
} }
impl Serialize for AggregateSignature { impl Serialize for AggregateSignature {
/// Serde serialization is compliant the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
{ {
serializer.serialize_str(&hex_encode(ssz_encode(self))) serializer.serialize_str(&hex_encode(self.as_bytes()))
} }
} }
impl<'de> Deserialize<'de> for AggregateSignature { impl<'de> Deserialize<'de> for AggregateSignature {
/// Serde serialization is compliant the Ethereum YAML test format.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; let bytes = deserializer.deserialize_str(HexVisitor)?;
let (obj, _) = <_>::ssz_decode(&bytes[..], 0) let agg_sig = decode(&bytes[..])
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(obj) Ok(agg_sig)
} }
} }
impl TreeHash for AggregateSignature { impl TreeHash for AggregateSignature {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
hash(&self.0.as_bytes()) hash(&self.as_bytes())
} }
} }
@ -110,7 +175,7 @@ impl TreeHash for AggregateSignature {
mod tests { mod tests {
use super::super::{Keypair, Signature}; use super::super::{Keypair, Signature};
use super::*; use super::*;
use ssz::ssz_encode; use ssz::{decode, ssz_encode};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -120,7 +185,7 @@ mod tests {
original.add(&Signature::new(&[42, 42], 0, &keypair.sk)); original.add(&Signature::new(&[42, 42], 0, &keypair.sk));
let bytes = ssz_encode(&original); let bytes = ssz_encode(&original);
let (decoded, _) = AggregateSignature::ssz_decode(&bytes, 0).unwrap(); let decoded = decode::<AggregateSignature>(&bytes).unwrap();
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }

View File

@ -1,12 +1,8 @@
use super::{fake_signature::FakeSignature, AggregatePublicKey}; use super::{fake_signature::FakeSignature, AggregatePublicKey, BLS_AGG_SIG_BYTE_SIZE};
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor};
use ssz::{ use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash,
};
const SIGNATURE_LENGTH: usize = 48;
/// A BLS aggregate signature. /// A BLS aggregate signature.
/// ///
@ -26,7 +22,7 @@ impl FakeAggregateSignature {
/// Creates a new all-zero's signature /// Creates a new all-zero's signature
pub fn zero() -> Self { pub fn zero() -> Self {
Self { Self {
bytes: vec![0; SIGNATURE_LENGTH], bytes: vec![0; BLS_AGG_SIG_BYTE_SIZE],
} }
} }
@ -35,6 +31,11 @@ impl FakeAggregateSignature {
// Do nothing. // Do nothing.
} }
/// Does glorious nothing.
pub fn add_aggregate(&mut self, _agg_sig: &FakeAggregateSignature) {
// Do nothing.
}
/// _Always_ returns `true`. /// _Always_ returns `true`.
pub fn verify( pub fn verify(
&self, &self,
@ -58,14 +59,21 @@ impl FakeAggregateSignature {
impl Encodable for FakeAggregateSignature { impl Encodable for FakeAggregateSignature {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.bytes); s.append_encoded_raw(&self.bytes);
} }
} }
impl Decodable for FakeAggregateSignature { impl Decodable for FakeAggregateSignature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_AGG_SIG_BYTE_SIZE {
Ok((FakeAggregateSignature { bytes: sig_bytes }, i)) return Err(DecodeError::TooShort);
}
Ok((
FakeAggregateSignature {
bytes: bytes[i..(i + BLS_AGG_SIG_BYTE_SIZE)].to_vec(),
},
i + BLS_AGG_SIG_BYTE_SIZE,
))
} }
} }

View File

@ -1,13 +1,9 @@
use super::serde_vistors::HexVisitor; use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE};
use super::{PublicKey, SecretKey};
use hex::encode as hex_encode; use hex::encode as hex_encode;
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use ssz::{ use serde_hex::HexVisitor;
decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
};
const SIGNATURE_LENGTH: usize = 48;
/// A single BLS signature. /// A single BLS signature.
/// ///
@ -27,7 +23,7 @@ impl FakeSignature {
/// Creates a new all-zero's signature /// Creates a new all-zero's signature
pub fn zero() -> Self { pub fn zero() -> Self {
Self { Self {
bytes: vec![0; SIGNATURE_LENGTH], bytes: vec![0; BLS_SIG_BYTE_SIZE],
} }
} }
@ -59,14 +55,21 @@ impl FakeSignature {
impl Encodable for FakeSignature { impl Encodable for FakeSignature {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.bytes); s.append_encoded_raw(&self.bytes);
} }
} }
impl Decodable for FakeSignature { impl Decodable for FakeSignature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_SIG_BYTE_SIZE {
Ok((FakeSignature { bytes: sig_bytes }, i)) return Err(DecodeError::TooShort);
}
Ok((
FakeSignature {
bytes: bytes[i..(i + BLS_SIG_BYTE_SIZE)].to_vec(),
},
i + BLS_SIG_BYTE_SIZE,
))
} }
} }

View File

@ -1,7 +1,9 @@
use super::{PublicKey, SecretKey}; use super::{PublicKey, SecretKey};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::fmt;
use std::hash::{Hash, Hasher};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Eq, Serialize, Deserialize)]
pub struct Keypair { pub struct Keypair {
pub sk: SecretKey, pub sk: SecretKey,
pub pk: PublicKey, pub pk: PublicKey,
@ -19,3 +21,27 @@ impl Keypair {
self.pk.concatenated_hex_id() self.pk.concatenated_hex_id()
} }
} }
impl PartialEq for Keypair {
fn eq(&self, other: &Keypair) -> bool {
self == other
}
}
impl Hash for Keypair {
/// Note: this is distinct from consensus serialization, it will produce a different hash.
///
/// This method uses the uncompressed bytes, which are much faster to obtain than the
/// compressed bytes required for consensus serialization.
///
/// Use `ssz::Encode` to obtain the bytes required for consensus hashing.
fn hash<H: Hasher>(&self, state: &mut H) {
self.pk.as_uncompressed_bytes().hash(state)
}
}
impl fmt::Display for Keypair {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.pk)
}
}

View File

@ -5,7 +5,6 @@ mod aggregate_public_key;
mod keypair; mod keypair;
mod public_key; mod public_key;
mod secret_key; mod secret_key;
mod serde_vistors;
#[cfg(not(debug_assertions))] #[cfg(not(debug_assertions))]
mod aggregate_signature; mod aggregate_signature;
@ -31,6 +30,9 @@ pub use crate::public_key::PublicKey;
pub use crate::secret_key::SecretKey; pub use crate::secret_key::SecretKey;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96;
pub const BLS_SIG_BYTE_SIZE: usize = 96;
pub const BLS_SECRET_KEY_BYTE_SIZE: usize = 48;
pub const BLS_PUBLIC_KEY_BYTE_SIZE: usize = 48;
use hashing::hash; use hashing::hash;
use ssz::ssz_encode; use ssz::ssz_encode;

View File

@ -1,12 +1,11 @@
use super::SecretKey; use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE};
use bls_aggregates::PublicKey as RawPublicKey; use bls_aggregates::PublicKey as RawPublicKey;
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use serde_hex::{encode as hex_encode, HexVisitor};
use ssz::{ use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash,
};
use std::default; use std::default;
use std::fmt;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
/// A single BLS signature. /// A single BLS signature.
@ -54,6 +53,12 @@ impl PublicKey {
} }
} }
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.concatenated_hex_id())
}
}
impl default::Default for PublicKey { impl default::Default for PublicKey {
fn default() -> Self { fn default() -> Self {
let secret_key = SecretKey::random(); let secret_key = SecretKey::random();
@ -63,15 +68,18 @@ impl default::Default for PublicKey {
impl Encodable for PublicKey { impl Encodable for PublicKey {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.0.as_bytes()); s.append_encoded_raw(&self.0.as_bytes());
} }
} }
impl Decodable for PublicKey { impl Decodable for PublicKey {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_PUBLIC_KEY_BYTE_SIZE {
let raw_sig = RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; return Err(DecodeError::TooShort);
Ok((PublicKey(raw_sig), i)) }
let raw_sig = RawPublicKey::from_bytes(&bytes[i..(i + BLS_PUBLIC_KEY_BYTE_SIZE)])
.map_err(|_| DecodeError::TooShort)?;
Ok((PublicKey(raw_sig), i + BLS_PUBLIC_KEY_BYTE_SIZE))
} }
} }
@ -89,10 +97,10 @@ impl<'de> Deserialize<'de> for PublicKey {
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; let bytes = deserializer.deserialize_str(HexVisitor)?;
let obj = PublicKey::from_bytes(&bytes[..]) let pubkey = decode(&bytes[..])
.map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e)))?; .map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e)))?;
Ok(obj) Ok(pubkey)
} }
} }

View File

@ -1,9 +1,10 @@
use super::serde_vistors::HexVisitor; use super::BLS_SECRET_KEY_BYTE_SIZE;
use bls_aggregates::{DecodeError as BlsDecodeError, SecretKey as RawSecretKey}; use bls_aggregates::{DecodeError as BlsDecodeError, SecretKey as RawSecretKey};
use hex::encode as hex_encode; use hex::encode as hex_encode;
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use ssz::{decode_ssz_list, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use serde_hex::HexVisitor;
use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
/// A single BLS signature. /// A single BLS signature.
/// ///
@ -32,15 +33,18 @@ impl SecretKey {
impl Encodable for SecretKey { impl Encodable for SecretKey {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.0.as_bytes()); s.append_encoded_raw(&self.0.as_bytes());
} }
} }
impl Decodable for SecretKey { impl Decodable for SecretKey {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_SECRET_KEY_BYTE_SIZE {
let raw_sig = RawSecretKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; return Err(DecodeError::TooShort);
Ok((SecretKey(raw_sig), i)) }
let raw_sig = RawSecretKey::from_bytes(&bytes[i..(i + BLS_SECRET_KEY_BYTE_SIZE)])
.map_err(|_| DecodeError::TooShort)?;
Ok((SecretKey(raw_sig), i + BLS_SECRET_KEY_BYTE_SIZE))
} }
} }
@ -59,9 +63,9 @@ impl<'de> Deserialize<'de> for SecretKey {
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let bytes = deserializer.deserialize_str(HexVisitor)?; let bytes = deserializer.deserialize_str(HexVisitor)?;
let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) let secret_key = decode::<SecretKey>(&bytes[..])
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(pubkey) Ok(secret_key)
} }
} }

View File

@ -1,21 +0,0 @@
use hex;
use serde::de::{self, Visitor};
use std::fmt;
pub struct HexVisitor;
impl<'de> Visitor<'de> for HexVisitor {
type Value = Vec<u8>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a hex string (irrelevant of prefix)")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(hex::decode(value.trim_start_matches("0x"))
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?)
}
}

View File

@ -1,12 +1,10 @@
use super::serde_vistors::HexVisitor; use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE};
use super::{PublicKey, SecretKey};
use bls_aggregates::Signature as RawSignature; use bls_aggregates::Signature as RawSignature;
use hex::encode as hex_encode; use hex::encode as hex_encode;
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use ssz::{ use serde_hex::HexVisitor;
decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
};
/// A single BLS signature. /// A single BLS signature.
/// ///
@ -63,7 +61,7 @@ impl Signature {
/// Returns a new empty signature. /// Returns a new empty signature.
pub fn empty_signature() -> Self { pub fn empty_signature() -> Self {
// Set RawSignature = infinity // Set RawSignature = infinity
let mut empty: Vec<u8> = vec![0; 96]; let mut empty: Vec<u8> = vec![0; BLS_SIG_BYTE_SIZE];
empty[0] += u8::pow(2, 6) + u8::pow(2, 7); empty[0] += u8::pow(2, 6) + u8::pow(2, 7);
Signature { Signature {
signature: RawSignature::from_bytes(&empty).unwrap(), signature: RawSignature::from_bytes(&empty).unwrap(),
@ -102,15 +100,17 @@ impl Signature {
impl Encodable for Signature { impl Encodable for Signature {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.as_bytes()); s.append_encoded_raw(&self.as_bytes());
} }
} }
impl Decodable for Signature { impl Decodable for Signature {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (sig_bytes, i) = decode_ssz_list(bytes, i)?; if bytes.len() - i < BLS_SIG_BYTE_SIZE {
let signature = Signature::from_bytes(&sig_bytes)?; return Err(DecodeError::TooShort);
Ok((signature, i)) }
let signature = Signature::from_bytes(&bytes[i..(i + BLS_SIG_BYTE_SIZE)])?;
Ok((signature, i + BLS_SIG_BYTE_SIZE))
} }
} }
@ -121,6 +121,7 @@ impl TreeHash for Signature {
} }
impl Serialize for Signature { impl Serialize for Signature {
/// Serde serialization is compliant the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
@ -130,14 +131,15 @@ impl Serialize for Signature {
} }
impl<'de> Deserialize<'de> for Signature { impl<'de> Deserialize<'de> for Signature {
/// Serde serialization is compliant the Ethereum YAML test format.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let bytes = deserializer.deserialize_str(HexVisitor)?; let bytes = deserializer.deserialize_str(HexVisitor)?;
let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) let signature = decode(&bytes[..])
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(pubkey) Ok(signature)
} }
} }
@ -154,7 +156,7 @@ mod tests {
let original = Signature::new(&[42, 42], 0, &keypair.sk); let original = Signature::new(&[42, 42], 0, &keypair.sk);
let bytes = ssz_encode(&original); let bytes = ssz_encode(&original);
let (decoded, _) = Signature::ssz_decode(&bytes, 0).unwrap(); let decoded = decode::<Signature>(&bytes).unwrap();
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }
@ -165,7 +167,7 @@ mod tests {
let sig_as_bytes: Vec<u8> = sig.as_raw().as_bytes(); let sig_as_bytes: Vec<u8> = sig.as_raw().as_bytes();
assert_eq!(sig_as_bytes.len(), 96); assert_eq!(sig_as_bytes.len(), BLS_SIG_BYTE_SIZE);
for (i, one_byte) in sig_as_bytes.iter().enumerate() { for (i, one_byte) in sig_as_bytes.iter().enumerate() {
if i == 0 { if i == 0 {
assert_eq!(*one_byte, u8::pow(2, 6) + u8::pow(2, 7)); assert_eq!(*one_byte, u8::pow(2, 6) + u8::pow(2, 7));

View File

@ -6,7 +6,7 @@ use bit_vec::BitVec;
use serde::de::{Deserialize, Deserializer}; use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer}; use serde::ser::{Serialize, Serializer};
use serde_hex::{encode, PrefixedHexVisitor}; use serde_hex::{encode, PrefixedHexVisitor};
use ssz::Decodable; use ssz::{Decodable, Encodable};
use std::cmp; use std::cmp;
use std::default; use std::default;
@ -33,10 +33,21 @@ impl BooleanBitfield {
} }
/// Create a new bitfield with the given length `initial_len` and all values set to `bit`. /// Create a new bitfield with the given length `initial_len` and all values set to `bit`.
pub fn from_elem(inital_len: usize, bit: bool) -> Self { ///
Self { /// Note: if `initial_len` is not a multiple of 8, the remaining bits will be set to `false`
0: BitVec::from_elem(inital_len, bit), /// regardless of `bit`.
pub fn from_elem(initial_len: usize, bit: bool) -> Self {
// BitVec can panic if we don't set the len to be a multiple of 8.
let full_len = ((initial_len + 7) / 8) * 8;
let mut bitfield = BitVec::from_elem(full_len, false);
if bit {
for i in 0..initial_len {
bitfield.set(i, true);
}
} }
Self { 0: bitfield }
} }
/// Create a new bitfield using the supplied `bytes` as input /// Create a new bitfield using the supplied `bytes` as input
@ -89,6 +100,11 @@ impl BooleanBitfield {
self.len() == 0 self.len() == 0
} }
/// Returns true if all bits are set to 0.
pub fn is_zero(&self) -> bool {
self.0.none()
}
/// Returns the number of bytes required to represent this bitfield. /// Returns the number of bytes required to represent this bitfield.
pub fn num_bytes(&self) -> usize { pub fn num_bytes(&self) -> usize {
self.to_bytes().len() self.to_bytes().len()
@ -104,6 +120,44 @@ impl BooleanBitfield {
pub fn to_bytes(&self) -> Vec<u8> { pub fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes() self.0.to_bytes()
} }
/// Compute the intersection (binary-and) of this bitfield with another. Lengths must match.
pub fn intersection(&self, other: &Self) -> Self {
let mut res = self.clone();
res.intersection_inplace(other);
res
}
/// Like `intersection` but in-place (updates `self`).
pub fn intersection_inplace(&mut self, other: &Self) {
self.0.intersect(&other.0);
}
/// Compute the union (binary-or) of this bitfield with another. Lengths must match.
pub fn union(&self, other: &Self) -> Self {
let mut res = self.clone();
res.union_inplace(other);
res
}
/// Like `union` but in-place (updates `self`).
pub fn union_inplace(&mut self, other: &Self) {
self.0.union(&other.0);
}
/// Compute the difference (binary-minus) of this bitfield with another. Lengths must match.
///
/// Computes `self - other`.
pub fn difference(&self, other: &Self) -> Self {
let mut res = self.clone();
res.difference_inplace(other);
res
}
/// Like `difference` but in-place (updates `self`).
pub fn difference_inplace(&mut self, other: &Self) {
self.0.difference(&other.0);
}
} }
impl default::Default for BooleanBitfield { impl default::Default for BooleanBitfield {
@ -125,10 +179,11 @@ impl cmp::PartialEq for BooleanBitfield {
/// Create a new bitfield that is a union of two other bitfields. /// Create a new bitfield that is a union of two other bitfields.
/// ///
/// For example `union(0101, 1000) == 1101` /// For example `union(0101, 1000) == 1101`
impl std::ops::BitAnd for BooleanBitfield { // TODO: length-independent intersection for BitAnd
impl std::ops::BitOr for BooleanBitfield {
type Output = Self; type Output = Self;
fn bitand(self, other: Self) -> Self { fn bitor(self, other: Self) -> Self {
let (biggest, smallest) = if self.len() > other.len() { let (biggest, smallest) = if self.len() > other.len() {
(&self, &other) (&self, &other)
} else { } else {
@ -144,14 +199,14 @@ impl std::ops::BitAnd for BooleanBitfield {
} }
} }
impl ssz::Encodable for BooleanBitfield { impl Encodable for BooleanBitfield {
// ssz_append encodes Self according to the `ssz` spec. // ssz_append encodes Self according to the `ssz` spec.
fn ssz_append(&self, s: &mut ssz::SszStream) { fn ssz_append(&self, s: &mut ssz::SszStream) {
s.append_vec(&self.to_bytes()) s.append_vec(&self.to_bytes())
} }
} }
impl ssz::Decodable for BooleanBitfield { impl Decodable for BooleanBitfield {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), ssz::DecodeError> { fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), ssz::DecodeError> {
let len = ssz::decode::decode_length(bytes, index, ssz::LENGTH_BYTES)?; let len = ssz::decode::decode_length(bytes, index, ssz::LENGTH_BYTES)?;
if (ssz::LENGTH_BYTES + len) > bytes.len() { if (ssz::LENGTH_BYTES + len) > bytes.len() {
@ -186,7 +241,7 @@ impl Serialize for BooleanBitfield {
where where
S: Serializer, S: Serializer,
{ {
serializer.serialize_str(&encode(&ssz::ssz_encode(self))) serializer.serialize_str(&encode(&self.to_bytes()))
} }
} }
@ -197,9 +252,7 @@ impl<'de> Deserialize<'de> for BooleanBitfield {
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?;
let (bitfield, _) = <_>::ssz_decode(&bytes[..], 0) Ok(BooleanBitfield::from_bytes(&bytes))
.map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?;
Ok(bitfield)
} }
} }
@ -212,7 +265,7 @@ impl ssz::TreeHash for BooleanBitfield {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use ssz::{ssz_encode, Decodable, SszStream}; use ssz::{decode, ssz_encode, SszStream};
#[test] #[test]
fn test_new_bitfield() { fn test_new_bitfield() {
@ -380,12 +433,12 @@ mod tests {
let mut stream = SszStream::new(); let mut stream = SszStream::new();
stream.append(&field); stream.append(&field);
assert_eq!(stream.drain(), vec![0, 0, 0, 2, 225, 192]); assert_eq!(stream.drain(), vec![2, 0, 0, 0, 225, 192]);
let field = BooleanBitfield::from_elem(18, true); let field = BooleanBitfield::from_elem(18, true);
let mut stream = SszStream::new(); let mut stream = SszStream::new();
stream.append(&field); stream.append(&field);
assert_eq!(stream.drain(), vec![0, 0, 0, 3, 255, 255, 192]); assert_eq!(stream.drain(), vec![3, 0, 0, 0, 255, 255, 192]);
} }
fn create_test_bitfield() -> BooleanBitfield { fn create_test_bitfield() -> BooleanBitfield {
@ -401,13 +454,13 @@ mod tests {
#[test] #[test]
fn test_ssz_decode() { fn test_ssz_decode() {
let encoded = vec![0, 0, 0, 2, 225, 192]; let encoded = vec![2, 0, 0, 0, 225, 192];
let (field, _): (BooleanBitfield, usize) = ssz::decode_ssz(&encoded, 0).unwrap(); let field = decode::<BooleanBitfield>(&encoded).unwrap();
let expected = create_test_bitfield(); let expected = create_test_bitfield();
assert_eq!(field, expected); assert_eq!(field, expected);
let encoded = vec![0, 0, 0, 3, 255, 255, 3]; let encoded = vec![3, 0, 0, 0, 255, 255, 3];
let (field, _): (BooleanBitfield, usize) = ssz::decode_ssz(&encoded, 0).unwrap(); let field = decode::<BooleanBitfield>(&encoded).unwrap();
let expected = BooleanBitfield::from_bytes(&[255, 255, 3]); let expected = BooleanBitfield::from_bytes(&[255, 255, 3]);
assert_eq!(field, expected); assert_eq!(field, expected);
} }
@ -416,15 +469,64 @@ mod tests {
fn test_ssz_round_trip() { fn test_ssz_round_trip() {
let original = BooleanBitfield::from_bytes(&vec![18; 12][..]); let original = BooleanBitfield::from_bytes(&vec![18; 12][..]);
let ssz = ssz_encode(&original); let ssz = ssz_encode(&original);
let (decoded, _) = BooleanBitfield::ssz_decode(&ssz, 0).unwrap(); let decoded = decode::<BooleanBitfield>(&ssz).unwrap();
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }
#[test] #[test]
fn test_bitand() { fn test_bitor() {
let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]);
let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]);
let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]);
assert_eq!(c, a & b); assert_eq!(c, a | b);
}
#[test]
fn test_is_zero() {
let yes_data: &[&[u8]] = &[&[], &[0], &[0, 0], &[0, 0, 0]];
for bytes in yes_data {
assert!(BooleanBitfield::from_bytes(bytes).is_zero());
}
let no_data: &[&[u8]] = &[&[1], &[6], &[0, 1], &[0, 0, 1], &[0, 0, 255]];
for bytes in no_data {
assert!(!BooleanBitfield::from_bytes(bytes).is_zero());
}
}
#[test]
fn test_intersection() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let c = BooleanBitfield::from_bytes(&[0b1000, 0b0001]);
assert_eq!(a.intersection(&b), c);
assert_eq!(b.intersection(&a), c);
assert_eq!(a.intersection(&c), c);
assert_eq!(b.intersection(&c), c);
assert_eq!(a.intersection(&a), a);
assert_eq!(b.intersection(&b), b);
assert_eq!(c.intersection(&c), c);
}
#[test]
fn test_union() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let c = BooleanBitfield::from_bytes(&[0b1111, 0b1001]);
assert_eq!(a.union(&b), c);
assert_eq!(b.union(&a), c);
assert_eq!(a.union(&a), a);
assert_eq!(b.union(&b), b);
assert_eq!(c.union(&c), c);
}
#[test]
fn test_difference() {
let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]);
let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]);
let a_b = BooleanBitfield::from_bytes(&[0b0100, 0b0000]);
let b_a = BooleanBitfield::from_bytes(&[0b0011, 0b1000]);
assert_eq!(a.difference(&b), a_b);
assert_eq!(b.difference(&a), b_a);
assert!(a.difference(&a).is_zero());
} }
} }

View File

@ -38,6 +38,24 @@ impl<'de> Visitor<'de> for PrefixedHexVisitor {
} }
} }
pub struct HexVisitor;
impl<'de> Visitor<'de> for HexVisitor {
type Value = Vec<u8>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a hex string (irrelevant of prefix)")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(hex::decode(value.trim_start_matches("0x"))
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?)
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;

View File

@ -8,3 +8,5 @@ edition = "2018"
bytes = "0.4.9" bytes = "0.4.9"
ethereum-types = "0.5" ethereum-types = "0.5"
hashing = { path = "../hashing" } hashing = { path = "../hashing" }
hex = "0.3"
yaml-rust = "0.4"

View File

@ -69,7 +69,7 @@ Syntax:
| Shorthand | Meaning | | Shorthand | Meaning |
|:-------------|:----------------------------------------------------| |:-------------|:----------------------------------------------------|
| `big` | ``big endian`` | | `little` | ``little endian`` |
| `to_bytes` | convert to bytes. Params: ``(size, byte order)`` | | `to_bytes` | convert to bytes. Params: ``(size, byte order)`` |
| `from_bytes` | convert from bytes. Params: ``(bytes, byte order)`` | | `from_bytes` | convert from bytes. Params: ``(bytes, byte order)`` |
| `value` | the value to serialize | | `value` | the value to serialize |
@ -82,7 +82,7 @@ Syntax:
Convert directly to bytes the size of the int. (e.g. ``int16 = 2 bytes``) Convert directly to bytes the size of the int. (e.g. ``int16 = 2 bytes``)
All integers are serialized as **big endian**. All integers are serialized as **little endian**.
| Check to perform | Code | | Check to perform | Code |
|:-----------------------|:------------------------| |:-----------------------|:------------------------|
@ -92,7 +92,7 @@ All integers are serialized as **big endian**.
```python ```python
buffer_size = int_size / 8 buffer_size = int_size / 8
return value.to_bytes(buffer_size, 'big') return value.to_bytes(buffer_size, 'little')
``` ```
#### Address #### Address
@ -131,7 +131,7 @@ For general `byte` type:
value_bytes ]`` value_bytes ]``
```python ```python
byte_length = (len(value)).to_bytes(4, 'big') byte_length = (len(value)).to_bytes(4, 'little')
return byte_length + value return byte_length + value
``` ```
@ -175,12 +175,12 @@ At each step, the following checks should be made:
Convert directly from bytes into integer utilising the number of bytes the same Convert directly from bytes into integer utilising the number of bytes the same
size as the integer length. (e.g. ``int16 == 2 bytes``) size as the integer length. (e.g. ``int16 == 2 bytes``)
All integers are interpreted as **big endian**. All integers are interpreted as **little endian**.
```python ```python
byte_length = int_size / 8 byte_length = int_size / 8
new_index = current_index + int_size new_index = current_index + int_size
return int.from_bytes(rawbytes[current_index:current_index+int_size], 'big'), new_index return int.from_bytes(rawbytes[current_index:current_index+int_size], 'little'), new_index
``` ```
#### Address #### Address
@ -206,7 +206,7 @@ return rawbytes[current_index:current_index+32], new_index
Get the length of the bytes, return the bytes. Get the length of the bytes, return the bytes.
```python ```python
bytes_length = int.from_bytes(rawbytes[current_index:current_index+4], 'big') bytes_length = int.from_bytes(rawbytes[current_index:current_index+4], 'little')
new_index = current_index + 4 + bytes_lenth new_index = current_index + 4 + bytes_lenth
return rawbytes[current_index+4:current_index+4+bytes_length], new_index return rawbytes[current_index+4:current_index+4+bytes_length], new_index
``` ```
@ -224,7 +224,7 @@ entire length of the list.
| rawbytes has enough left for length | ``len(rawbytes) > current_index + 4`` | | rawbytes has enough left for length | ``len(rawbytes) > current_index + 4`` |
```python ```python
total_length = int.from_bytes(rawbytes[current_index:current_index+4], 'big') total_length = int.from_bytes(rawbytes[current_index:current_index+4], 'little')
new_index = current_index + 4 + total_length new_index = current_index + 4 + total_length
item_index = current_index + 4 item_index = current_index + 4
deserialized_list = [] deserialized_list = []

View File

@ -9,7 +9,7 @@ publish = false
cargo-fuzz = true cargo-fuzz = true
[dependencies] [dependencies]
ethereum-types = "0.4.0" ethereum-types = "0.5"
[dependencies.ssz] [dependencies.ssz]
path = ".." path = ".."
@ -84,22 +84,22 @@ path = "fuzz_targets/fuzz_target_address_decode.rs"
name = "fuzz_target_address_encode" name = "fuzz_target_address_encode"
path = "fuzz_targets/fuzz_target_address_encode.rs" path = "fuzz_targets/fuzz_target_address_encode.rs"
[[bin]]
name = "fuzz_target_vec_decode"
path = "fuzz_targets/fuzz_target_vec_decode.rs"
[[bin]] [[bin]]
name = "fuzz_target_vec_address_decode" name = "fuzz_target_vec_address_decode"
path = "fuzz_targets/fuzz_target_vec_address_decode.rs" path = "fuzz_targets/fuzz_target_vec_address_decode.rs"
[[bin]]
name = "fuzz_target_vec_u64_decode"
path = "fuzz_targets/fuzz_target_vec_u64_decode.rs"
[[bin]] [[bin]]
name = "fuzz_target_vec_bool_decode" name = "fuzz_target_vec_bool_decode"
path = "fuzz_targets/fuzz_target_vec_bool_decode.rs" path = "fuzz_targets/fuzz_target_vec_bool_decode.rs"
[[bin]]
name = "fuzz_target_vec_decode"
path = "fuzz_targets/fuzz_target_vec_decode.rs"
[[bin]] [[bin]]
name = "fuzz_target_vec_encode" name = "fuzz_target_vec_encode"
path = "fuzz_targets/fuzz_target_vec_encode.rs" path = "fuzz_targets/fuzz_target_vec_encode.rs"
[[bin]]
name = "fuzz_target_vec_u64_decode"
path = "fuzz_targets/fuzz_target_vec_u64_decode.rs"

View File

@ -4,18 +4,17 @@ extern crate ethereum_types;
extern crate ssz; extern crate ssz;
use ethereum_types::Address; use ethereum_types::Address;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(Address, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<Address, DecodeError> = decode(data);
if data.len() >= 20 { if data.len() == 20 {
// Should have valid result // Should have valid result
let (address, index) = result.unwrap(); let address = result.unwrap();
assert_eq!(index, 20);
assert_eq!(address, Address::from_slice(&data[..20])); assert_eq!(address, Address::from_slice(&data[..20]));
} else { } else {
// Length of less than 32 should return error // Length of less than 32 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -2,27 +2,23 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(bool, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<bool, DecodeError> = decode(data);
if data.len() >= 1 { if data.len() == 1 {
// TODO: change to little endian bytes if data[0] == 1 {
// https://github.com/sigp/lighthouse/issues/215 let val_bool = result.unwrap();
if data[0] == u8::pow(2,7) {
let (val_bool, index) = result.unwrap();
assert!(val_bool); assert!(val_bool);
assert_eq!(index, 1);
} else if data[0] == 0 { } else if data[0] == 0 {
let (val_bool, index) = result.unwrap(); let val_bool = result.unwrap();
assert!(!val_bool); assert!(!val_bool);
assert_eq!(index, 1);
} else { } else {
assert_eq!(result, Err(DecodeError::Invalid)); assert_eq!(result, Err(DecodeError::Invalid));
} }
} else { } else {
// Length of 0 should return error // Length of 0 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -15,8 +15,6 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&val_bool); ssz.append(&val_bool);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes assert_eq!(val_bool, ssz[0]);
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(val_bool, ssz[0] % u8::pow(2, 6));
assert_eq!(ssz.len(), 1); assert_eq!(ssz.len(), 1);
}); });

View File

@ -4,18 +4,17 @@ extern crate ethereum_types;
extern crate ssz; extern crate ssz;
use ethereum_types::H256; use ethereum_types::H256;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(H256, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<H256, DecodeError> = decode(data);
if data.len() >= 32 { if data.len() == 32 {
// Should have valid result // Should have valid result
let (hash, index) = result.unwrap(); let hash = result.unwrap();
assert_eq!(index, 32);
assert_eq!(hash, H256::from_slice(&data[..32])); assert_eq!(hash, H256::from_slice(&data[..32]));
} else { } else {
// Length of less than 32 should return error // Length of less than 32 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -2,21 +2,18 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(u16, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<u16, DecodeError> = decode(data);
if data.len() >= 2 { if data.len() == 2 {
// Valid result // Valid result
let (number_u16, index) = result.unwrap(); let number_u16 = result.unwrap();
assert_eq!(index, 2); let val = u16::from_le_bytes([data[0], data[1]]);
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
let val = u16::from_be_bytes([data[0], data[1]]);
assert_eq!(number_u16, val); assert_eq!(number_u16, val);
} else { } else {
// Length of 0 or 1 should return error // Length of 0 or 1 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -15,8 +15,6 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&number_u16); ssz.append(&number_u16);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(ssz.len(), 2); assert_eq!(ssz.len(), 2);
assert_eq!(number_u16, u16::from_be_bytes([ssz[0], ssz[1]])); assert_eq!(number_u16, u16::from_le_bytes([ssz[0], ssz[1]]));
}); });

View File

@ -2,21 +2,18 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(u32, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<u32, DecodeError> = decode(data);
if data.len() >= 4 { if data.len() == 4 {
// Valid result // Valid result
let (number_u32, index) = result.unwrap(); let number_u32 = result.unwrap();
assert_eq!(index, 4); let val = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
let val = u32::from_be_bytes([data[0], data[1], data[2], data[3]]);
assert_eq!(number_u32, val); assert_eq!(number_u32, val);
} else { } else {
// Length less then 4 should return error // Length not 4 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -15,8 +15,6 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&number_u32); ssz.append(&number_u32);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(ssz.len(), 4); assert_eq!(ssz.len(), 4);
assert_eq!(number_u32, u32::from_be_bytes([ssz[0], ssz[1], ssz[2], ssz[3]])); assert_eq!(number_u32, u32::from_le_bytes([ssz[0], ssz[1], ssz[2], ssz[3]]));
}); });

View File

@ -2,18 +2,15 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(u64, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<u64, DecodeError> = decode(data);
if data.len() >= 8 { if data.len() == 8 {
// Valid result // Valid result
let (number_u64, index) = result.unwrap(); let number_u64 = result.unwrap();
assert_eq!(index, 8); let val = u64::from_le_bytes([
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
let val = u64::from_be_bytes([
data[0], data[0],
data[1], data[1],
data[2], data[2],
@ -25,7 +22,7 @@ fuzz_target!(|data: &[u8]| {
]); ]);
assert_eq!(number_u64, val); assert_eq!(number_u64, val);
} else { } else {
// Length less then 8 should return error // Length not 8 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -9,7 +9,7 @@ fuzz_target!(|data: &[u8]| {
let mut ssz = SszStream::new(); let mut ssz = SszStream::new();
let mut number_u64 = 0; let mut number_u64 = 0;
if data.len() >= 8 { if data.len() >= 8 {
number_u64 = u64::from_be_bytes([ number_u64 = u64::from_le_bytes([
data[0], data[0],
data[1], data[1],
data[2], data[2],
@ -24,10 +24,8 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&number_u64); ssz.append(&number_u64);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(ssz.len(), 8); assert_eq!(ssz.len(), 8);
assert_eq!(number_u64, u64::from_be_bytes([ assert_eq!(number_u64, u64::from_le_bytes([
ssz[0], ssz[0],
ssz[1], ssz[1],
ssz[2], ssz[2],

View File

@ -2,20 +2,17 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let result: Result<(u8, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<u8, DecodeError> = decode(data);
if data.len() >= 1 { if data.len() == 1 {
// Should have valid result // Should have valid result
let (number_u8, index) = result.unwrap(); let number_u8 = result.unwrap();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(index, 1);
assert_eq!(number_u8, data[0]); assert_eq!(number_u8, data[0]);
} else { } else {
// Length of 0 should return error // Length not 1 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -15,8 +15,6 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&number_u8); ssz.append(&number_u8);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(number_u8, ssz[0]); assert_eq!(number_u8, ssz[0]);
assert_eq!(ssz.len(), 1); assert_eq!(ssz.len(), 1);
}); });

View File

@ -2,19 +2,16 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{DecodeError, decode};
// Fuzz ssz_decode() // Fuzz decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
// Note: we assume architecture is 64 bit -> usize == 64 bits // Note: we assume architecture is 64 bit -> usize == 64 bits
let result: Result<(usize, usize), DecodeError> = Decodable::ssz_decode(data, 0); let result: Result<usize, DecodeError> = decode(data);
if data.len() >= 8 { if data.len() == 8 {
// Valid result // Valid result
let (number_usize, index) = result.unwrap(); let number_usize = result.unwrap();
assert_eq!(index, 8); let val = u64::from_le_bytes([
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
let val = u64::from_be_bytes([
data[0], data[0],
data[1], data[1],
data[2], data[2],
@ -27,6 +24,6 @@ fuzz_target!(|data: &[u8]| {
assert_eq!(number_usize, val as usize); assert_eq!(number_usize, val as usize);
} else { } else {
// Length less then 8 should return error // Length less then 8 should return error
assert_eq!(result, Err(DecodeError::TooShort)); assert!(result.is_err());
} }
}); });

View File

@ -9,7 +9,7 @@ fuzz_target!(|data: &[u8]| {
let mut ssz = SszStream::new(); let mut ssz = SszStream::new();
let mut number_usize = 0; let mut number_usize = 0;
if data.len() >= 8 { if data.len() >= 8 {
number_usize = u64::from_be_bytes([ number_usize = u64::from_le_bytes([
data[0], data[0],
data[1], data[1],
data[2], data[2],
@ -24,10 +24,8 @@ fuzz_target!(|data: &[u8]| {
ssz.append(&number_usize); ssz.append(&number_usize);
let ssz = ssz.drain(); let ssz = ssz.drain();
// TODO: change to little endian bytes
// https://github.com/sigp/lighthouse/issues/215
assert_eq!(ssz.len(), 8); assert_eq!(ssz.len(), 8);
assert_eq!(number_usize, u64::from_be_bytes([ assert_eq!(number_usize, u64::from_le_bytes([
ssz[0], ssz[0],
ssz[1], ssz[1],
ssz[2], ssz[2],

View File

@ -4,9 +4,9 @@ extern crate ethereum_types;
extern crate ssz; extern crate ssz;
use ethereum_types::{Address}; use ethereum_types::{Address};
use ssz::{DecodeError, Decodable}; use ssz::{decode, DecodeError};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let _result: Result<(Vec<Address>, usize), DecodeError> = Decodable::ssz_decode(data, 0); let _result: Result<Vec<Address>, DecodeError> = decode(data);
}); });

View File

@ -2,9 +2,9 @@
#[macro_use] extern crate libfuzzer_sys; #[macro_use] extern crate libfuzzer_sys;
extern crate ssz; extern crate ssz;
use ssz::{DecodeError, Decodable}; use ssz::{decode, DecodeError};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let _result: Result<(Vec<bool>, usize), DecodeError> = Decodable::ssz_decode(data, 0); let _result: Result<Vec<bool>, DecodeError> = decode(data);
}); });

View File

@ -3,10 +3,9 @@
extern crate ethereum_types; extern crate ethereum_types;
extern crate ssz; extern crate ssz;
use ethereum_types::{Address, H256}; use ssz::{decode, DecodeError, Decodable};
use ssz::{DecodeError, Decodable};
// Fuzz ssz_decode() // Fuzz ssz_decode()
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
let _result: Result<(Vec<u8>, usize), DecodeError> = Decodable::ssz_decode(data, 0); let _result: Result<Vec<u8>, DecodeError> = decode(data);
}); });

Some files were not shown because too many files have changed in this diff Show More