Merge pull request #192 from sigp/v0.2.0-spec

Impl v0.2.0-spec for Codebase
This commit is contained in:
Age Manning 2019-02-13 10:42:50 +11:00 committed by GitHub
commit e4f6fe047d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 2657 additions and 2756 deletions

View File

@ -2,8 +2,8 @@
members = [ members = [
"eth2/attester", "eth2/attester",
"eth2/block_producer", "eth2/block_producer",
"eth2/genesis",
"eth2/naive_fork_choice", "eth2/naive_fork_choice",
"eth2/state_processing",
"eth2/types", "eth2/types",
"eth2/utils/bls", "eth2/utils/bls",
"eth2/utils/boolean-bitfield", "eth2/utils/boolean-bitfield",
@ -12,7 +12,6 @@ members = [
"eth2/utils/slot_clock", "eth2/utils/slot_clock",
"eth2/utils/ssz", "eth2/utils/ssz",
"eth2/utils/vec_shuffle", "eth2/utils/vec_shuffle",
"eth2/validator_induction",
"beacon_node", "beacon_node",
"beacon_node/db", "beacon_node/db",
"beacon_node/beacon_chain", "beacon_node/beacon_chain",

View File

@ -14,7 +14,6 @@ clap = "2.32.0"
db = { path = "db" } db = { path = "db" }
dirs = "1.0.3" dirs = "1.0.3"
futures = "0.1.23" futures = "0.1.23"
genesis = { path = "../eth2/genesis" }
slog = "^2.2.3" slog = "^2.2.3"
slot_clock = { path = "../eth2/utils/slot_clock" } slot_clock = { path = "../eth2/utils/slot_clock" }
slog-term = "^2.4.0" slog-term = "^2.4.0"

View File

@ -11,7 +11,6 @@ boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
db = { path = "../db" } db = { path = "../db" }
failure = "0.1" failure = "0.1"
failure_derive = "0.1" failure_derive = "0.1"
genesis = { path = "../../eth2/genesis" }
hashing = { path = "../../eth2/utils/hashing" } hashing = { path = "../../eth2/utils/hashing" }
parking_lot = "0.7" parking_lot = "0.7"
log = "0.4" log = "0.4"
@ -21,4 +20,5 @@ serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
ssz = { path = "../../eth2/utils/ssz" } ssz = { path = "../../eth2/utils/ssz" }
state_processing = { path = "../../eth2/state_processing" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -1,3 +1,4 @@
use state_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use types::{ use types::{
beacon_state::CommitteesError, AggregateSignature, Attestation, AttestationData, BeaconState, beacon_state::CommitteesError, AggregateSignature, Attestation, AttestationData, BeaconState,
@ -16,6 +17,7 @@ const PHASE_0_CUSTODY_BIT: bool = false;
/// ///
/// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be /// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be
/// rectified in a future revision. /// rectified in a future revision.
#[derive(Default)]
pub struct AttestationAggregator { pub struct AttestationAggregator {
store: HashMap<Vec<u8>, Attestation>, store: HashMap<Vec<u8>, Attestation>,
} }
@ -172,9 +174,7 @@ impl AttestationAggregator {
self.store self.store
.values() .values()
.filter_map(|attestation| { .filter_map(|attestation| {
if state if validate_attestation_without_signature(&state, attestation, spec).is_ok()
.validate_attestation_without_signature(attestation, spec)
.is_ok()
&& !known_attestation_data.contains(&attestation.data) && !known_attestation_data.contains(&attestation.data)
{ {
Some(attestation.clone()) Some(attestation.clone())

View File

@ -1,6 +1,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use types::Hash256; use types::Hash256;
#[derive(Default)]
pub struct AttestationTargets { pub struct AttestationTargets {
map: HashMap<u64, Hash256>, map: HashMap<u64, Hash256>,
} }

View File

@ -1,24 +1,25 @@
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, DBError,
};
use genesis::{genesis_beacon_block, genesis_beacon_state};
use log::{debug, trace};
use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock;
use ssz::ssz_encode;
use std::sync::Arc;
use types::{
beacon_state::{BlockProcessingError, CommitteesError, SlotProcessingError},
readers::{BeaconBlockReader, BeaconStateReader},
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Eth1Data,
FreeAttestation, Hash256, PublicKey, Signature, Slot,
};
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::attestation_targets::AttestationTargets; use crate::attestation_targets::AttestationTargets;
use crate::block_graph::BlockGraph; use crate::block_graph::BlockGraph;
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, DBError,
};
use log::{debug, trace};
use parking_lot::{RwLock, RwLockReadGuard};
use slot_clock::SlotClock;
use ssz::ssz_encode;
use state_processing::{
BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError,
};
use std::sync::Arc;
use types::{
beacon_state::CommitteesError,
readers::{BeaconBlockReader, BeaconStateReader},
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Crosslink, Deposit,
Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, Signature, Slot,
};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Error { pub enum Error {
@ -66,7 +67,6 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock> {
pub attestation_aggregator: RwLock<AttestationAggregator>, pub attestation_aggregator: RwLock<AttestationAggregator>,
canonical_head: RwLock<CheckPoint>, canonical_head: RwLock<CheckPoint>,
finalized_head: RwLock<CheckPoint>, finalized_head: RwLock<CheckPoint>,
justified_head: RwLock<CheckPoint>,
pub state: RwLock<BeaconState>, pub state: RwLock<BeaconState>,
pub latest_attestation_targets: RwLock<AttestationTargets>, pub latest_attestation_targets: RwLock<AttestationTargets>,
pub spec: ChainSpec, pub spec: ChainSpec,
@ -82,40 +82,42 @@ where
state_store: Arc<BeaconStateStore<T>>, state_store: Arc<BeaconStateStore<T>>,
block_store: Arc<BeaconBlockStore<T>>, block_store: Arc<BeaconBlockStore<T>>,
slot_clock: U, slot_clock: U,
genesis_time: u64,
latest_eth1_data: Eth1Data,
initial_validator_deposits: Vec<Deposit>,
spec: ChainSpec, spec: ChainSpec,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
if spec.initial_validators.is_empty() { if initial_validator_deposits.is_empty() {
return Err(Error::InsufficientValidators); return Err(Error::InsufficientValidators);
} }
let genesis_state = genesis_beacon_state(&spec); let genesis_state = BeaconState::genesis(
genesis_time,
initial_validator_deposits,
latest_eth1_data,
&spec,
);
let state_root = genesis_state.canonical_root(); let state_root = genesis_state.canonical_root();
state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?;
let genesis_block = genesis_beacon_block(state_root, &spec); let genesis_block = BeaconBlock::genesis(state_root, &spec);
let block_root = genesis_block.canonical_root(); let block_root = genesis_block.canonical_root();
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
let block_graph = BlockGraph::new(); let block_graph = BlockGraph::new();
block_graph.add_leaf(&Hash256::zero(), block_root.clone()); block_graph.add_leaf(&Hash256::zero(), block_root);
let finalized_head = RwLock::new(CheckPoint::new( let finalized_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root.clone(), block_root,
genesis_state.clone(), genesis_state.clone(),
state_root.clone(), state_root,
));
let justified_head = RwLock::new(CheckPoint::new(
genesis_block.clone(),
block_root.clone(),
genesis_state.clone(),
state_root.clone(),
)); ));
let canonical_head = RwLock::new(CheckPoint::new( let canonical_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root.clone(), block_root,
genesis_state.clone(), genesis_state.clone(),
state_root.clone(), state_root,
)); ));
let attestation_aggregator = RwLock::new(AttestationAggregator::new()); let attestation_aggregator = RwLock::new(AttestationAggregator::new());
@ -128,11 +130,10 @@ where
block_graph, block_graph,
attestation_aggregator, attestation_aggregator,
state: RwLock::new(genesis_state.clone()), state: RwLock::new(genesis_state.clone()),
justified_head,
finalized_head, finalized_head,
canonical_head, canonical_head,
latest_attestation_targets, latest_attestation_targets,
spec: spec, spec,
}) })
} }
@ -202,7 +203,7 @@ where
for _ in state_slot.as_u64()..slot.as_u64() { for _ in state_slot.as_u64()..slot.as_u64() {
self.state self.state
.write() .write()
.per_slot_processing(head_block_root.clone(), &self.spec)?; .per_slot_processing(head_block_root, &self.spec)?;
} }
Ok(()) Ok(())
} }
@ -225,19 +226,6 @@ where
None None
} }
/// Returns the number of slots the validator has been required to propose.
///
/// Returns `None` if the `validator_index` is invalid.
///
/// Information is retrieved from the present `beacon_state.validator_registry`.
pub fn proposer_slots(&self, validator_index: usize) -> Option<u64> {
if let Some(validator) = self.state.read().validator_registry.get(validator_index) {
Some(validator.proposer_slots)
} else {
None
}
}
/// Reads the slot clock, returns `None` if the slot is unavailable. /// Reads the slot clock, returns `None` if the slot is unavailable.
/// ///
/// The slot might be unavailable due to an error with the system clock, or if the present time /// The slot might be unavailable due to an error with the system clock, or if the present time
@ -277,8 +265,8 @@ where
} }
/// Returns the justified slot for the present state. /// Returns the justified slot for the present state.
pub fn justified_slot(&self) -> Slot { pub fn justified_epoch(&self) -> Epoch {
self.state.read().justified_slot self.state.read().justified_epoch
} }
/// Returns the attestation slot and shard for a given validator index. /// Returns the attestation slot and shard for a given validator index.
@ -302,32 +290,36 @@ where
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
let justified_slot = self.justified_slot(); let justified_epoch = self.justified_epoch();
let justified_block_root = self let justified_block_root = *self
.state .state
.read() .read()
.get_block_root(justified_slot, &self.spec) .get_block_root(
.ok_or_else(|| Error::BadRecentBlockRoots)? justified_epoch.start_slot(self.spec.epoch_length),
.clone(); &self.spec,
)
.ok_or_else(|| Error::BadRecentBlockRoots)?;
let epoch_boundary_root = self let epoch_boundary_root = *self
.state .state
.read() .read()
.get_block_root( .get_block_root(
self.state.read().current_epoch_start_slot(&self.spec), self.state.read().current_epoch_start_slot(&self.spec),
&self.spec, &self.spec,
) )
.ok_or_else(|| Error::BadRecentBlockRoots)? .ok_or_else(|| Error::BadRecentBlockRoots)?;
.clone();
Ok(AttestationData { Ok(AttestationData {
slot: self.state.read().slot, slot: self.state.read().slot,
shard, shard,
beacon_block_root: self.head().beacon_block_root.clone(), beacon_block_root: self.head().beacon_block_root,
epoch_boundary_root, epoch_boundary_root,
shard_block_root: Hash256::zero(), shard_block_root: Hash256::zero(),
latest_crosslink_root: Hash256::zero(), latest_crosslink: Crosslink {
justified_slot, epoch: self.state.read().slot.epoch(self.spec.epoch_length),
shard_block_root: Hash256::zero(),
},
justified_epoch,
justified_block_root, justified_block_root,
}) })
} }
@ -357,7 +349,7 @@ where
let targets = self.latest_attestation_targets.read(); let targets = self.latest_attestation_targets.read();
match targets.get(validator_index) { match targets.get(validator_index) {
Some(hash) => Some(hash.clone()), Some(hash) => Some(*hash),
None => None, None => None,
} }
} }
@ -447,15 +439,11 @@ where
let parent_state = self let parent_state = self
.state_store .state_store
.get_reader(&parent_state_root)? .get_reader(&parent_state_root)?
.ok_or(Error::DBInconsistent(format!( .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?
"Missing state {}",
parent_state_root
)))?
.into_beacon_state() .into_beacon_state()
.ok_or(Error::DBInconsistent(format!( .ok_or_else(|| {
"State SSZ invalid {}", Error::DBInconsistent(format!("State SSZ invalid {}", parent_state_root))
parent_state_root })?;
)))?;
// TODO: check the block proposer signature BEFORE doing a state transition. This will // TODO: check the block proposer signature BEFORE doing a state transition. This will
// significantly lower exposure surface to DoS attacks. // significantly lower exposure surface to DoS attacks.
@ -463,7 +451,7 @@ where
// Transition the parent state to the present slot. // Transition the parent state to the present slot.
let mut state = parent_state; let mut state = parent_state;
for _ in state.slot.as_u64()..present_slot.as_u64() { for _ in state.slot.as_u64()..present_slot.as_u64() {
if let Err(e) = state.per_slot_processing(parent_block_root.clone(), &self.spec) { if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e), InvalidBlock::SlotProcessingError(e),
)); ));
@ -491,20 +479,14 @@ where
self.state_store.put(&state_root, &ssz_encode(&state)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
// Update the block DAG. // Update the block DAG.
self.block_graph self.block_graph.add_leaf(&parent_block_root, block_root);
.add_leaf(&parent_block_root, block_root.clone());
// If the parent block was the parent_block, automatically update the canonical head. // If the parent block was the parent_block, automatically update the canonical head.
// //
// TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be // TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be
// run instead. // run instead.
if self.head().beacon_block_root == parent_block_root { if self.head().beacon_block_root == parent_block_root {
self.update_canonical_head( self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
block.clone(),
block_root.clone(),
state.clone(),
state_root.clone(),
);
// Update the local state variable. // Update the local state variable.
*self.state.write() = state.clone(); *self.state.write() = state.clone();
} }
@ -533,15 +515,13 @@ where
attestations.len() attestations.len()
); );
let parent_root = state let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?;
.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?
.clone();
let mut block = BeaconBlock { let mut block = BeaconBlock {
slot: state.slot, slot: state.slot,
parent_root, parent_root,
state_root: Hash256::zero(), // Updated after the state is calculated. state_root: Hash256::zero(), // Updated after the state is calculated.
randao_reveal: randao_reveal, randao_reveal,
eth1_data: Eth1Data { eth1_data: Eth1Data {
// TODO: replace with real data // TODO: replace with real data
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
@ -550,11 +530,8 @@ where
signature: self.spec.empty_signature.clone(), // To be completed by a validator. signature: self.spec.empty_signature.clone(), // To be completed by a validator.
body: BeaconBlockBody { body: BeaconBlockBody {
proposer_slashings: vec![], proposer_slashings: vec![],
casper_slashings: vec![], attester_slashings: vec![],
attestations: attestations, attestations,
custody_reseeds: vec![],
custody_challenges: vec![],
custody_responses: vec![],
deposits: vec![], deposits: vec![],
exits: vec![], exits: vec![],
}, },

View File

@ -11,6 +11,7 @@ use types::Hash256;
/// Presently, the DAG root (genesis block) is not tracked. /// Presently, the DAG root (genesis block) is not tracked.
/// ///
/// The BlogGraph is thread-safe due to internal RwLocks. /// The BlogGraph is thread-safe due to internal RwLocks.
#[derive(Default)]
pub struct BlockGraph { pub struct BlockGraph {
pub leaves: RwLock<HashSet<Hash256>>, pub leaves: RwLock<HashSet<Hash256>>,
} }

View File

@ -57,19 +57,21 @@ where
let start = self let start = self
.block_store .block_store
.get_reader(&start_hash)? .get_reader(&start_hash)?
.ok_or(Error::MissingBeaconBlock(*start_hash))?; .ok_or_else(|| Error::MissingBeaconBlock(*start_hash))?;
let start_state_root = start.state_root(); let start_state_root = start.state_root();
let state = self let state = self
.state_store .state_store
.get_reader(&start_state_root)? .get_reader(&start_state_root)?
.ok_or(Error::MissingBeaconState(start_state_root))? .ok_or_else(|| Error::MissingBeaconState(start_state_root))?
.into_beacon_state() .into_beacon_state()
.ok_or(Error::InvalidBeaconState(start_state_root))?; .ok_or_else(|| Error::InvalidBeaconState(start_state_root))?;
let active_validator_indices = let active_validator_indices = get_active_validator_indices(
get_active_validator_indices(&state.validator_registry, start.slot()); &state.validator_registry,
start.slot().epoch(self.spec.epoch_length),
);
let mut attestation_targets = Vec::with_capacity(active_validator_indices.len()); let mut attestation_targets = Vec::with_capacity(active_validator_indices.len());
for i in active_validator_indices { for i in active_validator_indices {
@ -88,7 +90,7 @@ where
&self.block_graph.leaves(), &self.block_graph.leaves(),
)?; )?;
if child_hashes_and_slots.len() == 0 { if child_hashes_and_slots.is_empty() {
break; break;
} }
@ -124,7 +126,7 @@ fn get_vote_count<T: ClientDB>(
for target in attestation_targets { for target in attestation_targets {
let (root_at_slot, _) = block_store let (root_at_slot, _) = block_store
.block_at_slot(&block_root, slot)? .block_at_slot(&block_root, slot)?
.ok_or(Error::MissingBeaconBlock(*block_root))?; .ok_or_else(|| Error::MissingBeaconBlock(*block_root))?;
if root_at_slot == *target { if root_at_slot == *target {
count += 1; count += 1;
} }
@ -163,7 +165,7 @@ fn get_child_hashes_and_slots<T: ClientDB>(
break; break;
} }
current_hash = parent_root.clone(); current_hash = parent_root;
} else { } else {
return Err(Error::MissingBeaconBlock(current_hash)); return Err(Error::MissingBeaconBlock(current_hash));
} }

View File

@ -21,7 +21,6 @@ db = { path = "../../db" }
parking_lot = "0.7" parking_lot = "0.7"
failure = "0.1" failure = "0.1"
failure_derive = "0.1" failure_derive = "0.1"
genesis = { path = "../../../eth2/genesis" }
hashing = { path = "../../../eth2/utils/hashing" } hashing = { path = "../../../eth2/utils/hashing" }
log = "0.4" log = "0.4"
env_logger = "0.6.0" env_logger = "0.6.0"

View File

@ -1,6 +1,7 @@
use super::ValidatorHarness; use super::ValidatorHarness;
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
pub use beacon_chain::{CheckPoint, Error as BeaconChainError}; pub use beacon_chain::{CheckPoint, Error as BeaconChainError};
use bls::create_proof_of_possession;
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB, MemoryDB,
@ -13,7 +14,10 @@ use std::fs::File;
use std::io::prelude::*; use std::io::prelude::*;
use std::iter::FromIterator; use std::iter::FromIterator;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot, Validator}; use types::{
BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, FreeAttestation, Hash256,
Keypair, Slot,
};
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
/// to it. Each validator is provided a borrow to the beacon chain, where it may read /// to it. Each validator is provided a borrow to the beacon chain, where it may read
@ -35,16 +39,17 @@ impl BeaconChainHarness {
/// ///
/// - A keypair, `BlockProducer` and `Attester` for each validator. /// - A keypair, `BlockProducer` and `Attester` for each validator.
/// - A new BeaconChain struct where the given validators are in the genesis. /// - A new BeaconChain struct where the given validators are in the genesis.
pub fn new(mut spec: ChainSpec, validator_count: usize) -> Self { pub fn new(spec: ChainSpec, validator_count: usize) -> Self {
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let latest_eth1_data = Eth1Data {
// Remove the validators present in the spec (if any). deposit_root: Hash256::zero(),
spec.initial_validators = Vec::with_capacity(validator_count); block_hash: Hash256::zero(),
spec.initial_balances = Vec::with_capacity(validator_count); };
debug!("Generating validator keypairs..."); debug!("Generating validator keypairs...");
@ -54,25 +59,25 @@ impl BeaconChainHarness {
.map(|_| Keypair::random()) .map(|_| Keypair::random())
.collect(); .collect();
debug!("Creating validator records..."); debug!("Creating validator deposits...");
spec.initial_validators = keypairs let initial_validator_deposits = keypairs
.par_iter() .par_iter()
.map(|keypair| Validator { .map(|keypair| Deposit {
pubkey: keypair.pk.clone(), branch: vec![], // branch verification is not specified.
activation_slot: Slot::new(0), index: 0, // index verification is not specified.
..std::default::Default::default() deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: create_proof_of_possession(&keypair),
},
},
}) })
.collect(); .collect();
debug!("Setting validator balances...");
spec.initial_balances = spec
.initial_validators
.par_iter()
.map(|_| 32_000_000_000) // 32 ETH
.collect();
debug!("Creating the BeaconChain..."); debug!("Creating the BeaconChain...");
// Create the Beacon Chain // Create the Beacon Chain
@ -81,6 +86,9 @@ impl BeaconChainHarness {
state_store.clone(), state_store.clone(),
block_store.clone(), block_store.clone(),
slot_clock, slot_clock,
genesis_time,
latest_eth1_data,
initial_validator_deposits,
spec.clone(), spec.clone(),
) )
.unwrap(), .unwrap(),
@ -136,7 +144,7 @@ impl BeaconChainHarness {
.beacon_chain .beacon_chain
.state .state
.read() .read()
.get_crosslink_committees_at_slot(present_slot, &self.spec) .get_crosslink_committees_at_slot(present_slot, false, &self.spec)
.unwrap() .unwrap()
.iter() .iter()
.fold(vec![], |mut acc, (committee, _slot)| { .fold(vec![], |mut acc, (committee, _slot)| {
@ -226,7 +234,7 @@ impl BeaconChainHarness {
} }
/// Write the output of `chain_dump` to a JSON file. /// Write the output of `chain_dump` to a JSON file.
pub fn dump_to_file(&self, filename: String, chain_dump: &Vec<CheckPoint>) { pub fn dump_to_file(&self, filename: String, chain_dump: &[CheckPoint]) {
let json = serde_json::to_string(chain_dump).unwrap(); let json = serde_json::to_string(chain_dump).unwrap();
let mut file = File::create(filename).unwrap(); let mut file = File::create(filename).unwrap();
file.write_all(json.as_bytes()) file.write_all(json.as_bytes())

View File

@ -11,7 +11,7 @@ use db::ClientDB;
use parking_lot::RwLock; use parking_lot::RwLock;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{AttestationData, BeaconBlock, FreeAttestation, PublicKey, Signature, Slot}; use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
// mod attester; // mod attester;
// mod producer; // mod producer;
@ -70,20 +70,6 @@ impl<T: ClientDB, U: SlotClock> AttesterBeaconNode for DirectBeaconNode<T, U> {
} }
impl<T: ClientDB, U: SlotClock> BeaconBlockNode for DirectBeaconNode<T, U> { impl<T: ClientDB, U: SlotClock> BeaconBlockNode for DirectBeaconNode<T, U> {
/// Requests the `proposer_nonce` from the `BeaconChain`.
fn proposer_nonce(&self, pubkey: &PublicKey) -> Result<u64, BeaconBlockNodeError> {
let validator_index = self
.beacon_chain
.validator_index(pubkey)
.ok_or_else(|| BeaconBlockNodeError::RemoteFailure("pubkey unknown.".to_string()))?;
self.beacon_chain
.proposer_slots(validator_index)
.ok_or_else(|| {
BeaconBlockNodeError::RemoteFailure("validator_index unknown.".to_string())
})
}
/// Requests a new `BeaconBlock from the `BeaconChain`. /// Requests a new `BeaconBlock from the `BeaconChain`.
fn produce_beacon_block( fn produce_beacon_block(
&self, &self,
@ -94,7 +80,7 @@ impl<T: ClientDB, U: SlotClock> BeaconBlockNode for DirectBeaconNode<T, U> {
.beacon_chain .beacon_chain
.produce_block(randao_reveal.clone()) .produce_block(randao_reveal.clone())
.ok_or_else(|| { .ok_or_else(|| {
BeaconBlockNodeError::RemoteFailure(format!("Did not produce block.")) BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string())
})?; })?;
if block.slot == slot { if block.slot == slot {

View File

@ -60,7 +60,7 @@ impl<T: ClientDB, U: SlotClock> AttesterDutiesReader for DirectDuties<T, U> {
} }
Ok(Some(_)) => Ok(None), Ok(Some(_)) => Ok(None),
Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch), Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch),
Err(_) => panic!("Error when getting validator attestation shard."), Err(_) => unreachable!("Error when getting validator attestation shard."),
} }
} else { } else {
Err(AttesterDutiesReaderError::UnknownValidator) Err(AttesterDutiesReaderError::UnknownValidator)

View File

@ -1,6 +1,136 @@
mod direct_beacon_node; mod direct_beacon_node;
mod direct_duties; mod direct_duties;
mod local_signer; mod local_signer;
mod validator_harness;
pub use self::validator_harness::ValidatorHarness; use attester::PollOutcome as AttestationPollOutcome;
use attester::{Attester, Error as AttestationPollError};
use beacon_chain::BeaconChain;
use block_producer::PollOutcome as BlockPollOutcome;
use block_producer::{BlockProducer, Error as BlockPollError};
use db::MemoryDB;
use direct_beacon_node::DirectBeaconNode;
use direct_duties::DirectDuties;
use local_signer::LocalSigner;
use slot_clock::TestingSlotClock;
use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot};
#[derive(Debug, PartialEq)]
pub enum BlockProduceError {
DidNotProduce(BlockPollOutcome),
PollError(BlockPollError),
}
#[derive(Debug, PartialEq)]
pub enum AttestationProduceError {
DidNotProduce(AttestationPollOutcome),
PollError(AttestationPollError),
}
/// A `BlockProducer` and `Attester` which sign using a common keypair.
///
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
/// chain tests.
pub struct ValidatorHarness {
pub block_producer: BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock>,
DirectDuties<MemoryDB, TestingSlotClock>,
LocalSigner,
>,
pub attester: Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock>,
DirectDuties<MemoryDB, TestingSlotClock>,
LocalSigner,
>,
pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock>>,
pub keypair: Keypair,
pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock>>,
pub slot_clock: Arc<TestingSlotClock>,
pub signer: Arc<LocalSigner>,
}
impl ValidatorHarness {
/// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the
/// supplied beacon node.
///
/// A `BlockProducer` and `Attester` is created..
pub fn new(
keypair: Keypair,
beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock>>,
spec: Arc<ChainSpec>,
) -> Self {
let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64()));
let signer = Arc::new(LocalSigner::new(keypair.clone()));
let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone()));
let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone()));
let block_producer = BlockProducer::new(
spec.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
let attester = Attester::new(
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
Self {
block_producer,
attester,
spec,
epoch_map,
keypair,
beacon_node,
slot_clock,
signer,
}
}
/// Run the `poll` function on the `BlockProducer` and produce a block.
///
/// An error is returned if the producer refuses to produce.
pub fn produce_block(&mut self) -> Result<BeaconBlock, BlockProduceError> {
// Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to
// publish a block.
match self.block_producer.poll() {
Ok(BlockPollOutcome::BlockProduced(_)) => {}
Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)),
Err(error) => return Err(BlockProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_block()
.expect("Unable to obtain produced block."))
}
/// Run the `poll` function on the `Attester` and produce a `FreeAttestation`.
///
/// An error is returned if the attester refuses to attest.
pub fn produce_free_attestation(&mut self) -> Result<FreeAttestation, AttestationProduceError> {
match self.attester.poll() {
Ok(AttestationPollOutcome::AttestationProduced(_)) => {}
Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)),
Err(error) => return Err(AttestationProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_free_attestation()
.expect("Unable to obtain produced attestation."))
}
/// Set the validators slot clock to the specified slot.
///
/// The validators slot clock will always read this value until it is set to something else.
pub fn set_slot(&mut self, slot: Slot) {
self.slot_clock.set_slot(slot.as_u64())
}
}

View File

@ -1,133 +0,0 @@
use super::direct_beacon_node::DirectBeaconNode;
use super::direct_duties::DirectDuties;
use super::local_signer::LocalSigner;
use attester::PollOutcome as AttestationPollOutcome;
use attester::{Attester, Error as AttestationPollError};
use beacon_chain::BeaconChain;
use block_producer::PollOutcome as BlockPollOutcome;
use block_producer::{BlockProducer, Error as BlockPollError};
use db::MemoryDB;
use slot_clock::TestingSlotClock;
use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot};
#[derive(Debug, PartialEq)]
pub enum BlockProduceError {
DidNotProduce(BlockPollOutcome),
PollError(BlockPollError),
}
#[derive(Debug, PartialEq)]
pub enum AttestationProduceError {
DidNotProduce(AttestationPollOutcome),
PollError(AttestationPollError),
}
/// A `BlockProducer` and `Attester` which sign using a common keypair.
///
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
/// chain tests.
pub struct ValidatorHarness {
pub block_producer: BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock>,
DirectDuties<MemoryDB, TestingSlotClock>,
LocalSigner,
>,
pub attester: Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock>,
DirectDuties<MemoryDB, TestingSlotClock>,
LocalSigner,
>,
pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock>>,
pub keypair: Keypair,
pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock>>,
pub slot_clock: Arc<TestingSlotClock>,
pub signer: Arc<LocalSigner>,
}
impl ValidatorHarness {
/// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the
/// supplied beacon node.
///
/// A `BlockProducer` and `Attester` is created..
pub fn new(
keypair: Keypair,
beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock>>,
spec: Arc<ChainSpec>,
) -> Self {
let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64()));
let signer = Arc::new(LocalSigner::new(keypair.clone()));
let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone()));
let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone()));
let block_producer = BlockProducer::new(
spec.clone(),
keypair.pk.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
let attester = Attester::new(
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
Self {
block_producer,
attester,
spec,
epoch_map,
keypair,
beacon_node,
slot_clock,
signer,
}
}
/// Run the `poll` function on the `BlockProducer` and produce a block.
///
/// An error is returned if the producer refuses to produce.
pub fn produce_block(&mut self) -> Result<BeaconBlock, BlockProduceError> {
// Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to
// publish a block.
match self.block_producer.poll() {
Ok(BlockPollOutcome::BlockProduced(_)) => {}
Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)),
Err(error) => return Err(BlockProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_block()
.expect("Unable to obtain produced block."))
}
/// Run the `poll` function on the `Attester` and produce a `FreeAttestation`.
///
/// An error is returned if the attester refuses to attest.
pub fn produce_free_attestation(&mut self) -> Result<FreeAttestation, AttestationProduceError> {
match self.attester.poll() {
Ok(AttestationPollOutcome::AttestationProduced(_)) => {}
Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)),
Err(error) => return Err(AttestationProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_free_attestation()
.expect("Unable to obtain produced attestation."))
}
/// Set the validators slot clock to the specified slot.
///
/// The validators slot clock will always read this value until it is set to something else.
pub fn set_slot(&mut self, slot: Slot) {
self.slot_clock.set_slot(slot.as_u64())
}
}

View File

@ -8,6 +8,7 @@ use std::path::PathBuf;
use crate::config::LighthouseConfig; use crate::config::LighthouseConfig;
use crate::rpc::start_server; use crate::rpc::start_server;
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use bls::create_proof_of_possession;
use clap::{App, Arg}; use clap::{App, Arg};
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
@ -16,7 +17,7 @@ use db::{
use slog::{error, info, o, Drain}; use slog::{error, info, o, Drain};
use slot_clock::SystemTimeSlotClock; use slot_clock::SystemTimeSlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::ChainSpec; use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair};
fn main() { fn main() {
let decorator = slog_term::TermDecorator::new().build(); let decorator = slog_term::TermDecorator::new().build();
@ -75,13 +76,51 @@ fn main() {
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
// Slot clock // Slot clock
let slot_clock = SystemTimeSlotClock::new(spec.genesis_time, spec.slot_duration) let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration)
.expect("Unable to load SystemTimeSlotClock"); .expect("Unable to load SystemTimeSlotClock");
/*
* Generate some random data to start a chain with.
*
* This is will need to be replace for production usage.
*/
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let keypairs: Vec<Keypair> = (0..10)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not specified.
index: 0, // index verification is not specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: create_proof_of_possession(&keypair),
},
},
})
.collect();
// Genesis chain // Genesis chain
// TODO: persist chain to storage. let _chain_result = BeaconChain::genesis(
let _chain_result = state_store.clone(),
BeaconChain::genesis(state_store.clone(), block_store.clone(), slot_clock, spec); block_store.clone(),
slot_clock,
genesis_time,
latest_eth1_data,
initial_validator_deposits,
spec,
);
let _server = start_server(log.clone()); let _server = start_server(log.clone());

View File

@ -25,7 +25,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance {
// TODO: build a legit block. // TODO: build a legit block.
let mut block = BeaconBlockProto::new(); let mut block = BeaconBlockProto::new();
block.set_slot(req.get_slot()); block.set_slot(req.get_slot());
block.set_block_root("cats".as_bytes().to_vec()); block.set_block_root(b"cats".to_vec());
let mut resp = ProduceBeaconBlockResponse::new(); let mut resp = ProduceBeaconBlockResponse::new();
resp.set_block(block); resp.set_block(block);

View File

@ -4,7 +4,7 @@ mod traits;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::ssz_encode; use ssz::ssz_encode;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, PublicKey, Slot}; use types::{BeaconBlock, ChainSpec, Slot};
pub use self::traits::{ pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
@ -48,7 +48,6 @@ pub enum Error {
/// Relies upon an external service to keep the `EpochDutiesMap` updated. /// Relies upon an external service to keep the `EpochDutiesMap` updated.
pub struct BlockProducer<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> { pub struct BlockProducer<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub last_processed_slot: Option<Slot>, pub last_processed_slot: Option<Slot>,
pubkey: PublicKey,
spec: Arc<ChainSpec>, spec: Arc<ChainSpec>,
epoch_map: Arc<V>, epoch_map: Arc<V>,
slot_clock: Arc<T>, slot_clock: Arc<T>,
@ -60,7 +59,6 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
/// Returns a new instance where `last_processed_slot == 0`. /// Returns a new instance where `last_processed_slot == 0`.
pub fn new( pub fn new(
spec: Arc<ChainSpec>, spec: Arc<ChainSpec>,
pubkey: PublicKey,
epoch_map: Arc<V>, epoch_map: Arc<V>,
slot_clock: Arc<T>, slot_clock: Arc<T>,
beacon_node: Arc<U>, beacon_node: Arc<U>,
@ -68,7 +66,6 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
) -> Self { ) -> Self {
Self { Self {
last_processed_slot: None, last_processed_slot: None,
pubkey,
spec, spec,
epoch_map, epoch_map,
slot_clock, slot_clock,
@ -134,10 +131,8 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
/// slashing. /// slashing.
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> { fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
let randao_reveal = { let randao_reveal = {
let producer_nonce = self.beacon_node.proposer_nonce(&self.pubkey)?; // TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
let message = ssz_encode(&slot.epoch(self.spec.epoch_length));
// TODO: add domain, etc to this message.
let message = ssz_encode(&producer_nonce);
match self.signer.sign_randao_reveal(&message) { match self.signer.sign_randao_reveal(&message) {
None => return Ok(PollOutcome::SignerRejection(slot)), None => return Ok(PollOutcome::SignerRejection(slot)),
@ -240,11 +235,9 @@ mod tests {
let produce_epoch = produce_slot.epoch(spec.epoch_length); let produce_epoch = produce_slot.epoch(spec.epoch_length);
epoch_map.map.insert(produce_epoch, produce_slot); epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map); let epoch_map = Arc::new(epoch_map);
let keypair = Keypair::random();
let mut block_producer = BlockProducer::new( let mut block_producer = BlockProducer::new(
spec.clone(), spec.clone(),
keypair.pk.clone(),
epoch_map.clone(), epoch_map.clone(),
slot_clock.clone(), slot_clock.clone(),
beacon_node.clone(), beacon_node.clone(),
@ -254,7 +247,6 @@ mod tests {
// Configure responses from the BeaconNode. // Configure responses from the BeaconNode.
beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng)))); beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng))));
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock)); beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock));
beacon_node.set_next_nonce_result(Ok(0));
// One slot before production slot... // One slot before production slot...
slot_clock.set_slot(produce_slot.as_u64() - 1); slot_clock.set_slot(produce_slot.as_u64() - 1);

View File

@ -1,17 +1,13 @@
use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome}; use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome};
use std::sync::RwLock; use std::sync::RwLock;
use types::{BeaconBlock, PublicKey, Signature, Slot}; use types::{BeaconBlock, Signature, Slot};
type NonceResult = Result<u64, BeaconNodeError>;
type ProduceResult = Result<Option<BeaconBlock>, BeaconNodeError>; type ProduceResult = Result<Option<BeaconBlock>, BeaconNodeError>;
type PublishResult = Result<PublishOutcome, BeaconNodeError>; type PublishResult = Result<PublishOutcome, BeaconNodeError>;
/// A test-only struct used to simulate a Beacon Node. /// A test-only struct used to simulate a Beacon Node.
#[derive(Default)] #[derive(Default)]
pub struct SimulatedBeaconNode { pub struct SimulatedBeaconNode {
pub nonce_input: RwLock<Option<PublicKey>>,
pub nonce_result: RwLock<Option<NonceResult>>,
pub produce_input: RwLock<Option<(Slot, Signature)>>, pub produce_input: RwLock<Option<(Slot, Signature)>>,
pub produce_result: RwLock<Option<ProduceResult>>, pub produce_result: RwLock<Option<ProduceResult>>,
@ -20,11 +16,6 @@ pub struct SimulatedBeaconNode {
} }
impl SimulatedBeaconNode { impl SimulatedBeaconNode {
/// Set the result to be returned when `produce_beacon_block` is called.
pub fn set_next_nonce_result(&self, result: NonceResult) {
*self.nonce_result.write().unwrap() = Some(result);
}
/// Set the result to be returned when `produce_beacon_block` is called. /// Set the result to be returned when `produce_beacon_block` is called.
pub fn set_next_produce_result(&self, result: ProduceResult) { pub fn set_next_produce_result(&self, result: ProduceResult) {
*self.produce_result.write().unwrap() = Some(result); *self.produce_result.write().unwrap() = Some(result);
@ -37,14 +28,6 @@ impl SimulatedBeaconNode {
} }
impl BeaconNode for SimulatedBeaconNode { impl BeaconNode for SimulatedBeaconNode {
fn proposer_nonce(&self, pubkey: &PublicKey) -> NonceResult {
*self.nonce_input.write().unwrap() = Some(pubkey.clone());
match *self.nonce_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("SimulatedBeaconNode: nonce_result == None"),
}
}
/// Returns the value specified by the `set_next_produce_result`. /// Returns the value specified by the `set_next_produce_result`.
fn produce_beacon_block(&self, slot: Slot, randao_reveal: &Signature) -> ProduceResult { fn produce_beacon_block(&self, slot: Slot, randao_reveal: &Signature) -> ProduceResult {
*self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone())); *self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone()));

View File

@ -1,4 +1,4 @@
use types::{BeaconBlock, PublicKey, Signature, Slot}; use types::{BeaconBlock, Signature, Slot};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError { pub enum BeaconNodeError {
@ -14,9 +14,6 @@ pub enum PublishOutcome {
/// Defines the methods required to produce and publish blocks on a Beacon Node. /// Defines the methods required to produce and publish blocks on a Beacon Node.
pub trait BeaconNode: Send + Sync { pub trait BeaconNode: Send + Sync {
/// Requests the proposer nonce (presently named `proposer_slots`).
fn proposer_nonce(&self, pubkey: &PublicKey) -> Result<u64, BeaconNodeError>;
/// Request that the node produces a block. /// Request that the node produces a block.
/// ///
/// Returns Ok(None) if the Beacon Node is unable to produce at the given slot. /// Returns Ok(None) if the Beacon Node is unable to produce at the given slot.

View File

@ -1,11 +0,0 @@
[package]
name = "genesis"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
bls = { path = "../utils/bls" }
ssz = { path = "../utils/ssz" }
types = { path = "../types" }
validator_induction = { path = "../validator_induction" }

View File

@ -1,93 +0,0 @@
use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256};
/// Generate a genesis BeaconBlock.
pub fn genesis_beacon_block(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock {
BeaconBlock {
slot: spec.genesis_slot,
parent_root: spec.zero_hash,
state_root,
randao_reveal: spec.empty_signature.clone(),
eth1_data: Eth1Data {
deposit_root: spec.zero_hash,
block_hash: spec.zero_hash,
},
signature: spec.empty_signature.clone(),
body: BeaconBlockBody {
proposer_slashings: vec![],
casper_slashings: vec![],
attestations: vec![],
custody_reseeds: vec![],
custody_challenges: vec![],
custody_responses: vec![],
deposits: vec![],
exits: vec![],
},
}
}
#[cfg(test)]
mod tests {
use super::*;
use bls::Signature;
#[test]
fn test_state_root() {
let spec = ChainSpec::foundation();
let state_root = Hash256::from("cats".as_bytes());
let block = genesis_beacon_block(state_root, &spec);
assert_eq!(block.state_root, state_root);
}
#[test]
fn test_zero_items() {
let spec = ChainSpec::foundation();
let state_root = Hash256::zero();
let genesis_block = genesis_beacon_block(state_root, &spec);
assert!(genesis_block.slot == 0);
assert!(genesis_block.parent_root.is_zero());
assert_eq!(genesis_block.randao_reveal, Signature::empty_signature());
assert!(genesis_block.eth1_data.deposit_root.is_zero());
assert!(genesis_block.eth1_data.block_hash.is_zero());
}
#[test]
fn test_beacon_body() {
let spec = ChainSpec::foundation();
let state_root = Hash256::zero();
let genesis_block = genesis_beacon_block(state_root, &spec);
// Custody items are not being implemented until phase 1 so tests to be added later
assert!(genesis_block.body.proposer_slashings.is_empty());
assert!(genesis_block.body.casper_slashings.is_empty());
assert!(genesis_block.body.attestations.is_empty());
assert!(genesis_block.body.deposits.is_empty());
assert!(genesis_block.body.exits.is_empty());
}
#[test]
fn test_signature() {
let spec = ChainSpec::foundation();
let state_root = Hash256::zero();
let genesis_block = genesis_beacon_block(state_root, &spec);
// Signature should consist of [bytes48(0), bytes48(0)]
// Note this is implemented using Apache Milagro BLS which requires one extra byte -> 97bytes
let raw_sig = genesis_block.signature.as_raw();
let raw_sig_bytes = raw_sig.as_bytes();
for item in raw_sig_bytes.iter() {
assert!(*item == 0);
}
assert_eq!(genesis_block.signature, Signature::empty_signature());
}
}

View File

@ -1,194 +0,0 @@
use types::{BeaconState, ChainSpec, Crosslink, Fork};
pub fn genesis_beacon_state(spec: &ChainSpec) -> BeaconState {
let initial_crosslink = Crosslink {
slot: spec.genesis_slot,
shard_block_root: spec.zero_hash,
};
BeaconState {
/*
* Misc
*/
slot: spec.genesis_slot,
genesis_time: spec.genesis_time,
fork_data: Fork {
pre_fork_version: spec.genesis_fork_version,
post_fork_version: spec.genesis_fork_version,
fork_slot: spec.genesis_slot,
},
/*
* Validator registry
*/
validator_registry: spec.initial_validators.clone(),
validator_balances: spec.initial_balances.clone(),
validator_registry_update_slot: spec.genesis_slot,
validator_registry_exit_count: 0,
validator_registry_delta_chain_tip: spec.zero_hash,
/*
* Randomness and committees
*/
latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize],
latest_vdf_outputs: vec![
spec.zero_hash;
(spec.latest_randao_mixes_length / spec.epoch_length) as usize
],
previous_epoch_start_shard: spec.genesis_start_shard,
current_epoch_start_shard: spec.genesis_start_shard,
previous_epoch_calculation_slot: spec.genesis_slot,
current_epoch_calculation_slot: spec.genesis_slot,
previous_epoch_seed: spec.zero_hash,
current_epoch_seed: spec.zero_hash,
/*
* Custody challenges
*/
custody_challenges: vec![],
/*
* Finality
*/
previous_justified_slot: spec.genesis_slot,
justified_slot: spec.genesis_slot,
justification_bitfield: 0,
finalized_slot: spec.genesis_slot,
/*
* Recent state
*/
latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize],
latest_block_roots: vec![spec.zero_hash; spec.latest_block_roots_length as usize],
latest_penalized_balances: vec![0; spec.latest_penalized_exit_length as usize],
latest_attestations: vec![],
batched_block_roots: vec![],
/*
* PoW receipt root
*/
latest_eth1_data: spec.intial_eth1_data.clone(),
eth1_data_votes: vec![],
}
}
#[cfg(test)]
mod tests {
use super::*;
use types::Hash256;
#[test]
fn test_genesis_state() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
assert_eq!(
state.validator_registry.len(),
spec.initial_validators.len()
);
}
#[test]
fn test_genesis_state_misc() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
assert_eq!(state.slot, 0);
assert_eq!(state.genesis_time, spec.genesis_time);
assert_eq!(state.fork_data.pre_fork_version, 0);
assert_eq!(state.fork_data.post_fork_version, 0);
assert_eq!(state.fork_data.fork_slot, 0);
}
#[test]
fn test_genesis_state_validators() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
assert_eq!(state.validator_registry, spec.initial_validators);
assert_eq!(state.validator_balances, spec.initial_balances);
assert!(state.validator_registry_update_slot == 0);
assert!(state.validator_registry_exit_count == 0);
assert_eq!(state.validator_registry_delta_chain_tip, Hash256::zero());
}
#[test]
fn test_genesis_state_randomness_committees() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
// Array of size 8,192 each being zero_hash
assert_eq!(state.latest_randao_mixes.len(), 8_192);
for item in state.latest_randao_mixes.iter() {
assert_eq!(*item, Hash256::zero());
}
// Array of size 8,192 each being a zero hash
assert_eq!(state.latest_vdf_outputs.len(), (8_192 / 64));
for item in state.latest_vdf_outputs.iter() {
assert_eq!(*item, Hash256::zero());
}
// TODO: Check shard and committee shuffling requires solving issue:
// https://github.com/sigp/lighthouse/issues/151
// initial_shuffling = get_shuffling(Hash256::zero(), &state.validator_registry, 0, 0)
// initial_shuffling = initial_shuffling.append(initial_shuffling.clone());
}
// Custody not implemented until Phase 1
#[test]
fn test_genesis_state_custody() {}
#[test]
fn test_genesis_state_finanilty() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
assert_eq!(state.previous_justified_slot, 0);
assert_eq!(state.justified_slot, 0);
assert_eq!(state.justification_bitfield, 0);
assert_eq!(state.finalized_slot, 0);
}
#[test]
fn test_genesis_state_recent_state() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
// Test latest_crosslinks
assert_eq!(state.latest_crosslinks.len(), 1_024);
for link in state.latest_crosslinks.iter() {
assert_eq!(link.slot, 0);
assert_eq!(link.shard_block_root, Hash256::zero());
}
// Test latest_block_roots
assert_eq!(state.latest_block_roots.len(), 8_192);
for block in state.latest_block_roots.iter() {
assert_eq!(*block, Hash256::zero());
}
// Test latest_penalized_balances
assert_eq!(state.latest_penalized_balances.len(), 8_192);
for item in state.latest_penalized_balances.iter() {
assert!(*item == 0);
}
// Test latest_attestations
assert!(state.latest_attestations.is_empty());
// batched_block_roots
assert!(state.batched_block_roots.is_empty());
}
#[test]
fn test_genesis_state_deposit_root() {
let spec = ChainSpec::foundation();
let state = genesis_beacon_state(&spec);
assert_eq!(&state.latest_eth1_data, &spec.intial_eth1_data);
assert!(state.eth1_data_votes.is_empty());
}
}

View File

@ -1,5 +0,0 @@
mod beacon_block;
mod beacon_state;
pub use crate::beacon_block::genesis_beacon_block;
pub use crate::beacon_state::genesis_beacon_state;

View File

@ -1,10 +1,13 @@
[package] [package]
name = "validator_induction" name = "state_processing"
version = "0.1.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
bls = { path = "../utils/bls" }
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
integer-sqrt = "0.1"
log = "0.4"
ssz = { path = "../utils/ssz" }
types = { path = "../types" } types = { path = "../types" }
rayon = "1.0"

View File

@ -0,0 +1,403 @@
use crate::SlotProcessingError;
use hashing::hash;
use log::debug;
use ssz::{ssz_encode, TreeHash};
use types::{
beacon_state::{AttestationValidationError, CommitteesError},
AggregatePublicKey, Attestation, BeaconBlock, BeaconState, ChainSpec, Crosslink, Epoch, Exit,
Fork, Hash256, PendingAttestation, PublicKey, Signature,
};
// TODO: define elsehwere.
const DOMAIN_PROPOSAL: u64 = 2;
const DOMAIN_EXIT: u64 = 3;
const DOMAIN_RANDAO: u64 = 4;
const PHASE_0_CUSTODY_BIT: bool = false;
const DOMAIN_ATTESTATION: u64 = 1;
#[derive(Debug, PartialEq)]
pub enum Error {
DBError(String),
StateAlreadyTransitioned,
PresentSlotIsNone,
UnableToDecodeBlock,
MissingParentState(Hash256),
InvalidParentState(Hash256),
MissingBeaconBlock(Hash256),
InvalidBeaconBlock(Hash256),
MissingParentBlock(Hash256),
NoBlockProducer,
StateSlotMismatch,
BadBlockSignature,
BadRandaoSignature,
MaxProposerSlashingsExceeded,
BadProposerSlashing,
MaxAttestationsExceeded,
InvalidAttestation(AttestationValidationError),
NoBlockRoot,
MaxDepositsExceeded,
MaxExitsExceeded,
BadExit,
BadCustodyReseeds,
BadCustodyChallenges,
BadCustodyResponses,
CommitteesError(CommitteesError),
SlotProcessingError(SlotProcessingError),
}
macro_rules! ensure {
($condition: expr, $result: expr) => {
if !$condition {
return Err($result);
}
};
}
pub trait BlockProcessable {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>;
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error>;
}
impl BlockProcessable for BeaconState {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, true, spec)
}
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, false, spec)
}
}
fn per_block_processing_signature_optional(
state: &mut BeaconState,
block: &BeaconBlock,
verify_block_signature: bool,
spec: &ChainSpec,
) -> Result<(), Error> {
ensure!(block.slot == state.slot, Error::StateSlotMismatch);
/*
* Proposer Signature
*/
let block_proposer_index = state
.get_beacon_proposer_index(block.slot, spec)
.map_err(|_| Error::NoBlockProducer)?;
let block_proposer = &state.validator_registry[block_proposer_index];
if verify_block_signature {
ensure!(
bls_verify(
&block_proposer.pubkey,
&block.proposal_root(spec)[..],
&block.signature,
get_domain(&state.fork, state.current_epoch(spec), DOMAIN_PROPOSAL)
),
Error::BadBlockSignature
);
}
/*
* RANDAO
*/
ensure!(
bls_verify(
&block_proposer.pubkey,
&ssz_encode(&state.current_epoch(spec)),
&block.randao_reveal,
get_domain(&state.fork, state.current_epoch(spec), DOMAIN_RANDAO)
),
Error::BadRandaoSignature
);
// TODO: check this is correct.
let new_mix = {
let mut mix = state.latest_randao_mixes
[state.slot.as_usize() % spec.latest_randao_mixes_length]
.to_vec();
mix.append(&mut ssz_encode(&block.randao_reveal));
Hash256::from(&hash(&mix)[..])
};
state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix;
/*
* Eth1 data
*/
// TODO: Eth1 data processing.
/*
* Proposer slashings
*/
ensure!(
block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
Error::MaxProposerSlashingsExceeded
);
for proposer_slashing in &block.body.proposer_slashings {
let proposer = state
.validator_registry
.get(proposer_slashing.proposer_index as usize)
.ok_or(Error::BadProposerSlashing)?;
ensure!(
proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.block_root
!= proposer_slashing.proposal_data_2.block_root,
Error::BadProposerSlashing
);
ensure!(
proposer.penalized_epoch > state.current_epoch(spec),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_1.hash_tree_root(),
&proposer_slashing.proposal_signature_1,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_1
.slot
.epoch(spec.epoch_length),
DOMAIN_PROPOSAL
)
),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_2.hash_tree_root(),
&proposer_slashing.proposal_signature_2,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_2
.slot
.epoch(spec.epoch_length),
DOMAIN_PROPOSAL
)
),
Error::BadProposerSlashing
);
state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?;
}
/*
* Attestations
*/
ensure!(
block.body.attestations.len() as u64 <= spec.max_attestations,
Error::MaxAttestationsExceeded
);
for attestation in &block.body.attestations {
validate_attestation(&state, attestation, spec)?;
let pending_attestation = PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
state.latest_attestations.push(pending_attestation);
}
debug!(
"{} attestations verified & processed.",
block.body.attestations.len()
);
/*
* Deposits
*/
ensure!(
block.body.deposits.len() as u64 <= spec.max_deposits,
Error::MaxDepositsExceeded
);
// TODO: process deposits.
/*
* Exits
*/
ensure!(
block.body.exits.len() as u64 <= spec.max_exits,
Error::MaxExitsExceeded
);
for exit in &block.body.exits {
let validator = state
.validator_registry
.get(exit.validator_index as usize)
.ok_or(Error::BadExit)?;
ensure!(
validator.exit_epoch
> state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec),
Error::BadExit
);
ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit);
let exit_message = {
let exit_struct = Exit {
epoch: exit.epoch,
validator_index: exit.validator_index,
signature: spec.empty_signature.clone(),
};
exit_struct.hash_tree_root()
};
ensure!(
bls_verify(
&validator.pubkey,
&exit_message,
&exit.signature,
get_domain(&state.fork, exit.epoch, DOMAIN_EXIT)
),
Error::BadProposerSlashing
);
state.initiate_validator_exit(exit.validator_index as usize);
}
debug!("State transition complete.");
Ok(())
}
pub fn validate_attestation(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, true)
}
pub fn validate_attestation_without_signature(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, false)
}
fn validate_attestation_signature_optional(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
verify_signature: bool,
) -> Result<(), AttestationValidationError> {
ensure!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
AttestationValidationError::IncludedTooEarly
);
ensure!(
attestation.data.slot + spec.epoch_length >= state.slot,
AttestationValidationError::IncludedTooLate
);
if attestation.data.slot >= state.current_epoch_start_slot(spec) {
ensure!(
attestation.data.justified_epoch == state.justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
} else {
ensure!(
attestation.data.justified_epoch == state.previous_justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
}
ensure!(
attestation.data.justified_block_root
== *state
.get_block_root(
attestation
.data
.justified_epoch
.start_slot(spec.epoch_length),
&spec
)
.ok_or(AttestationValidationError::NoBlockRoot)?,
AttestationValidationError::WrongJustifiedRoot
);
let potential_crosslink = Crosslink {
shard_block_root: attestation.data.shard_block_root,
epoch: attestation.data.slot.epoch(spec.epoch_length),
};
ensure!(
(attestation.data.latest_crosslink
== state.latest_crosslinks[attestation.data.shard as usize])
| (attestation.data.latest_crosslink == potential_crosslink),
AttestationValidationError::BadLatestCrosslinkRoot
);
if verify_signature {
let participants = state.get_attestation_participants(
&attestation.data,
&attestation.aggregation_bitfield,
spec,
)?;
let mut group_public_key = AggregatePublicKey::new();
for participant in participants {
group_public_key.add(
state.validator_registry[participant as usize]
.pubkey
.as_raw(),
)
}
ensure!(
attestation.verify_signature(
&group_public_key,
PHASE_0_CUSTODY_BIT,
get_domain(
&state.fork,
attestation.data.slot.epoch(spec.epoch_length),
DOMAIN_ATTESTATION,
)
),
AttestationValidationError::BadSignature
);
}
ensure!(
attestation.data.shard_block_root == spec.zero_hash,
AttestationValidationError::ShardBlockRootNotZero
);
Ok(())
}
fn get_domain(_fork: &Fork, _epoch: Epoch, _domain_type: u64) -> u64 {
// TODO: stubbed out.
0
}
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool {
// TODO: add domain
signature.verify(message, pubkey)
}
impl From<AttestationValidationError> for Error {
fn from(e: AttestationValidationError) -> Error {
Error::InvalidAttestation(e)
}
}
impl From<CommitteesError> for Error {
fn from(e: CommitteesError) -> Error {
Error::CommitteesError(e)
}
}
impl From<SlotProcessingError> for Error {
fn from(e: SlotProcessingError) -> Error {
Error::SlotProcessingError(e)
}
}

View File

@ -0,0 +1,716 @@
use integer_sqrt::IntegerSquareRoot;
use log::{debug, trace};
use rayon::prelude::*;
use ssz::TreeHash;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
use types::{
beacon_state::{AttestationParticipantsError, CommitteesError, InclusionError},
validator_registry::get_active_validator_indices,
BeaconState, ChainSpec, Crosslink, Epoch, Hash256, PendingAttestation,
};
macro_rules! safe_add_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_add($b);
};
}
macro_rules! safe_sub_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_sub($b);
};
}
#[derive(Debug, PartialEq)]
pub enum Error {
UnableToDetermineProducer,
NoBlockRoots,
BaseRewardQuotientIsZero,
NoRandaoSeed,
CommitteesError(CommitteesError),
AttestationParticipantsError(AttestationParticipantsError),
InclusionError(InclusionError),
WinningRootError(WinningRootError),
}
#[derive(Debug, PartialEq)]
pub enum WinningRootError {
NoWinningRoot,
AttestationParticipantsError(AttestationParticipantsError),
}
#[derive(Clone)]
pub struct WinningRoot {
pub shard_block_root: Hash256,
pub attesting_validator_indices: Vec<usize>,
pub total_balance: u64,
pub total_attesting_balance: u64,
}
pub trait EpochProcessable {
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>;
}
impl EpochProcessable for BeaconState {
// Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it
// remains monolithic to allow for easier spec updates. Once the spec is more stable we can
// optimise.
#[allow(clippy::cyclomatic_complexity)]
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = self.current_epoch(spec);
let previous_epoch = self.previous_epoch(spec);
let next_epoch = self.next_epoch(spec);
debug!(
"Starting per-epoch processing on epoch {}...",
self.current_epoch(spec)
);
/*
* Validators attesting during the current epoch.
*/
let active_validator_indices = get_active_validator_indices(
&self.validator_registry,
self.slot.epoch(spec.epoch_length),
);
let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec);
trace!(
"{} validators with a total balance of {} wei.",
active_validator_indices.len(),
current_total_balance
);
let current_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.current_epoch(spec)
})
.collect();
trace!(
"Current epoch attestations: {}",
current_epoch_attestations.len()
);
let current_epoch_boundary_attestations: Vec<&PendingAttestation> =
current_epoch_attestations
.par_iter()
.filter(
|a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) {
Some(block_root) => {
(a.data.epoch_boundary_root == *block_root)
&& (a.data.justified_epoch == self.justified_epoch)
}
None => unreachable!(),
},
)
.cloned()
.collect();
let current_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&current_epoch_boundary_attestations[..], spec)?;
let current_epoch_boundary_attesting_balance =
self.get_total_balance(&current_epoch_boundary_attester_indices[..], spec);
trace!(
"Current epoch boundary attesters: {}",
current_epoch_boundary_attester_indices.len()
);
/*
* Validators attesting during the previous epoch
*/
/*
* Validators that made an attestation during the previous epoch
*/
let previous_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
//TODO: ensure these saturating subs are correct.
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.previous_epoch(spec)
})
.collect();
debug!(
"previous epoch attestations: {}",
previous_epoch_attestations.len()
);
let previous_epoch_attester_indices =
self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?;
let previous_total_balance =
self.get_total_balance(&previous_epoch_attester_indices[..], spec);
/*
* Validators targetting the previous justified slot
*/
let previous_epoch_justified_attestations: Vec<&PendingAttestation> = {
let mut a: Vec<&PendingAttestation> = current_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
let mut b: Vec<&PendingAttestation> = previous_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
a.append(&mut b);
a
};
let previous_epoch_justified_attester_indices = self
.get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?;
let previous_epoch_justified_attesting_balance =
self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec);
/*
* Validators justifying the epoch boundary block at the start of the previous epoch
*/
let previous_epoch_boundary_attestations: Vec<&PendingAttestation> =
previous_epoch_justified_attestations
.iter()
.filter(
|a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) {
Some(block_root) => a.data.epoch_boundary_root == *block_root,
None => unreachable!(),
},
)
.cloned()
.collect();
let previous_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?;
let previous_epoch_boundary_attesting_balance =
self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec);
/*
* Validators attesting to the expected beacon chain head during the previous epoch.
*/
let previous_epoch_head_attestations: Vec<&PendingAttestation> =
previous_epoch_attestations
.iter()
.filter(|a| match self.get_block_root(a.data.slot, spec) {
Some(block_root) => a.data.beacon_block_root == *block_root,
None => unreachable!(),
})
.cloned()
.collect();
let previous_epoch_head_attester_indices =
self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?;
let previous_epoch_head_attesting_balance =
self.get_total_balance(&previous_epoch_head_attester_indices[..], spec);
debug!(
"previous_epoch_head_attester_balance of {} wei.",
previous_epoch_head_attesting_balance
);
/*
* Eth1 Data
*/
if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 {
for eth1_data_vote in &self.eth1_data_votes {
if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period {
self.latest_eth1_data = eth1_data_vote.eth1_data.clone();
}
}
self.eth1_data_votes = vec![];
}
/*
* Justification
*/
let mut new_justified_epoch = self.justified_epoch;
self.justification_bitfield <<= 1;
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 2nd bit of the bitfield.
// - Set the previous epoch to be justified.
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 2;
new_justified_epoch = previous_epoch;
trace!(">= 2/3 voted for previous epoch boundary");
}
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 1st bit of the bitfield.
// - Set the current epoch to be justified.
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 1;
new_justified_epoch = current_epoch;
trace!(">= 2/3 voted for current epoch boundary");
}
// If:
//
// - All three epochs prior to this epoch have been justified.
// - The previous justified justified epoch was three epochs ago.
//
// Then, set the finalized epoch to be three epochs ago.
if ((self.justification_bitfield >> 1) % 8 == 0b111)
& (self.previous_justified_epoch == previous_epoch - 2)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 3 was finalized (1st condition).");
}
// If:
//
// - Both two epochs prior to this epoch have been justified.
// - The previous justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if ((self.justification_bitfield >> 1) % 4 == 0b11)
& (self.previous_justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 2 was finalized (2nd condition).");
}
// If:
//
// - This epoch and the two prior have been justified.
// - The presently justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 2 was finalized (3rd condition).");
}
// If:
//
// - This epoch and the epoch prior to it have been justified.
// - Set the previous epoch to be justified.
//
// Then, set the finalized epoch to be the previous epoch.
if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) {
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 1 was finalized (4th condition).");
}
self.previous_justified_epoch = self.justified_epoch;
self.justified_epoch = new_justified_epoch;
debug!(
"Finalized epoch {}, justified epoch {}.",
self.finalized_epoch, self.justified_epoch
);
/*
* Crosslinks
*/
// Cached for later lookups.
let mut winning_root_for_shards: HashMap<u64, Result<WinningRoot, WinningRootError>> =
HashMap::new();
// for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot {
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, false, spec)?;
for (crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
let winning_root = winning_root(
self,
shard,
&current_epoch_attestations,
&previous_epoch_attestations,
spec,
);
if let Ok(winning_root) = &winning_root {
let total_committee_balance =
self.get_total_balance(&crosslink_committee[..], spec);
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
self.latest_crosslinks[shard as usize] = Crosslink {
epoch: current_epoch,
shard_block_root: winning_root.shard_block_root,
}
}
}
winning_root_for_shards.insert(shard, winning_root);
}
}
trace!(
"Found {} winning shard roots.",
winning_root_for_shards.len()
);
/*
* Rewards and Penalities
*/
let base_reward_quotient = previous_total_balance.integer_sqrt();
if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero);
}
/*
* Justification and finalization
*/
let epochs_since_finality = next_epoch - self.finalized_epoch;
let previous_epoch_justified_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned());
let previous_epoch_boundary_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned());
let previous_epoch_head_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned());
let previous_epoch_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_attester_indices.iter().cloned());
let active_validator_indices_hashset: HashSet<usize> =
HashSet::from_iter(active_validator_indices.iter().cloned());
debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len());
debug!("{} epochs since finality.", epochs_since_finality);
if epochs_since_finality <= 4 {
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_justified_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_boundary_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_head_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_add_assign!(
self.validator_balances[index],
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
)
}
} else {
for index in 0..self.validator_balances.len() {
let inactivity_penalty = self.inactivity_penalty(
index,
epochs_since_finality,
base_reward_quotient,
spec,
);
if active_validator_indices_hashset.contains(&index) {
if !previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if self.validator_registry[index].penalized_epoch <= current_epoch {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(
self.validator_balances[index],
2 * inactivity_penalty + base_reward
);
}
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_sub_assign!(
self.validator_balances[index],
base_reward
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
);
}
}
trace!("Processed validator justification and finalization rewards/penalities.");
/*
* Attestation inclusion
*/
for &index in &previous_epoch_attester_indices_hashset {
let inclusion_slot =
self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?;
let proposer_index = self
.get_beacon_proposer_index(inclusion_slot, spec)
.map_err(|_| Error::UnableToDetermineProducer)?;
let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[proposer_index],
base_reward / spec.includer_reward_quotient
);
}
trace!(
"Previous epoch attesters: {}.",
previous_epoch_attester_indices_hashset.len()
);
/*
* Crosslinks
*/
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, false, spec)?;
for (_crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) {
// TODO: remove the map.
let attesting_validator_indices: HashSet<usize> = HashSet::from_iter(
winning_root.attesting_validator_indices.iter().cloned(),
);
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if attesting_validator_indices.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
} else {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in &winning_root.attesting_validator_indices {
let base_reward = self.base_reward(*index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[*index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
}
}
}
}
/*
* Ejections
*/
self.process_ejections(spec);
/*
* Validator Registry
*/
self.previous_calculation_epoch = self.current_calculation_epoch;
self.previous_epoch_start_shard = self.current_epoch_start_shard;
self.previous_epoch_seed = self.current_epoch_seed;
let should_update_validator_registy = if self.finalized_epoch
> self.validator_registry_update_epoch
{
(0..self.get_current_epoch_committee_count(spec)).all(|i| {
let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count;
self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch
})
} else {
false
};
if should_update_validator_registy {
self.update_validator_registry(spec);
self.current_calculation_epoch = next_epoch;
self.current_epoch_start_shard = (self.current_epoch_start_shard
+ self.get_current_epoch_committee_count(spec) as u64)
% spec.shard_count;
self.current_epoch_seed = self
.generate_seed(self.current_calculation_epoch, spec)
.ok_or_else(|| Error::NoRandaoSeed)?;
} else {
let epochs_since_last_registry_update =
current_epoch - self.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
self.current_calculation_epoch = next_epoch;
self.current_epoch_seed = self
.generate_seed(self.current_calculation_epoch, spec)
.ok_or_else(|| Error::NoRandaoSeed)?;
}
}
self.process_penalties_and_exits(spec);
self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize)
% spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices(
&self.validator_registry,
next_epoch + Epoch::from(spec.entry_exit_delay),
));
self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] =
self.latest_penalized_balances
[current_epoch.as_usize() % spec.latest_penalized_exit_length];
self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self
.get_randao_mix(current_epoch, spec)
.and_then(|x| Some(*x))
.ok_or_else(|| Error::NoRandaoSeed)?;
self.latest_attestations = self
.latest_attestations
.iter()
.filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch)
.cloned()
.collect();
debug!("Epoch transition complete.");
Ok(())
}
}
fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
Hash256::from(&input.hash_tree_root()[..])
}
fn winning_root(
state: &BeaconState,
shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
spec: &ChainSpec,
) -> Result<WinningRoot, WinningRootError> {
let mut attestations = current_epoch_attestations.to_vec();
attestations.append(&mut previous_epoch_attestations.to_vec());
let mut candidates: HashMap<Hash256, WinningRoot> = HashMap::new();
let mut highest_seen_balance = 0;
for a in &attestations {
if a.data.shard != shard {
continue;
}
let shard_block_root = &a.data.shard_block_root;
if candidates.contains_key(shard_block_root) {
continue;
}
// TODO: `cargo fmt` makes this rather ugly; tidy up.
let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result<
_,
AttestationParticipantsError,
>>(vec![], |mut acc, a| {
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
acc.append(&mut state.get_attestation_participants(
&a.data,
&a.aggregation_bitfield,
spec,
)?);
}
Ok(acc)
})?;
let total_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
let total_attesting_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
if total_attesting_balance > highest_seen_balance {
highest_seen_balance = total_attesting_balance;
}
let candidate_root = WinningRoot {
shard_block_root: *shard_block_root,
attesting_validator_indices,
total_attesting_balance,
total_balance,
};
candidates.insert(*shard_block_root, candidate_root);
}
Ok(candidates
.iter()
.filter_map(|(_hash, candidate)| {
if candidate.total_attesting_balance == highest_seen_balance {
Some(candidate)
} else {
None
}
})
.min_by_key(|candidate| candidate.shard_block_root)
.ok_or_else(|| WinningRootError::NoWinningRoot)?
// TODO: avoid clone.
.clone())
}
impl From<InclusionError> for Error {
fn from(e: InclusionError) -> Error {
Error::InclusionError(e)
}
}
impl From<CommitteesError> for Error {
fn from(e: CommitteesError) -> Error {
Error::CommitteesError(e)
}
}
impl From<AttestationParticipantsError> for Error {
fn from(e: AttestationParticipantsError) -> Error {
Error::AttestationParticipantsError(e)
}
}
impl From<AttestationParticipantsError> for WinningRootError {
fn from(e: AttestationParticipantsError) -> WinningRootError {
WinningRootError::AttestationParticipantsError(e)
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -0,0 +1,10 @@
mod block_processable;
mod epoch_processable;
mod slot_processable;
pub use block_processable::{
validate_attestation, validate_attestation_without_signature, BlockProcessable,
Error as BlockProcessingError,
};
pub use epoch_processable::{EpochProcessable, Error as EpochProcessingError};
pub use slot_processable::{Error as SlotProcessingError, SlotProcessable};

View File

@ -0,0 +1,70 @@
use crate::{EpochProcessable, EpochProcessingError};
use types::{beacon_state::CommitteesError, BeaconState, ChainSpec, Hash256};
#[derive(Debug, PartialEq)]
pub enum Error {
CommitteesError(CommitteesError),
EpochProcessingError(EpochProcessingError),
}
pub trait SlotProcessable {
fn per_slot_processing(
&mut self,
previous_block_root: Hash256,
spec: &ChainSpec,
) -> Result<(), Error>;
}
impl SlotProcessable for BeaconState
where
BeaconState: EpochProcessable,
{
fn per_slot_processing(
&mut self,
previous_block_root: Hash256,
spec: &ChainSpec,
) -> Result<(), Error> {
if (self.slot + 1) % spec.epoch_length == 0 {
self.per_epoch_processing(spec)?;
}
self.slot += 1;
self.latest_randao_mixes[self.slot.as_usize() % spec.latest_randao_mixes_length] =
self.latest_randao_mixes[(self.slot.as_usize() - 1) % spec.latest_randao_mixes_length];
// Block roots.
self.latest_block_roots[(self.slot.as_usize() - 1) % spec.latest_block_roots_length] =
previous_block_root;
if self.slot.as_usize() % spec.latest_block_roots_length == 0 {
let root = merkle_root(&self.latest_block_roots[..]);
self.batched_block_roots.push(root);
}
Ok(())
}
}
fn merkle_root(_input: &[Hash256]) -> Hash256 {
Hash256::zero()
}
impl From<CommitteesError> for Error {
fn from(e: CommitteesError) -> Error {
Error::CommitteesError(e)
}
}
impl From<EpochProcessingError> for Error {
fn from(e: EpochProcessingError) -> Error {
Error::EpochProcessingError(e)
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -10,7 +10,6 @@ boolean-bitfield = { path = "../utils/boolean-bitfield" }
ethereum-types = "0.4.0" ethereum-types = "0.4.0"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
honey-badger-split = { path = "../utils/honey-badger-split" } honey-badger-split = { path = "../utils/honey-badger-split" }
integer-sqrt = "0.1"
log = "0.4" log = "0.4"
rayon = "1.0" rayon = "1.0"
rand = "0.5.5" rand = "0.5.5"

View File

@ -1,14 +1,13 @@
use super::{AttestationData, Bitfield, Hash256}; use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, Hash256};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use bls::AggregateSignature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, Clone, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize)]
pub struct Attestation { pub struct Attestation {
pub data: AttestationData,
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData,
pub custody_bitfield: Bitfield, pub custody_bitfield: Bitfield,
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
@ -21,12 +20,23 @@ impl Attestation {
pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> { pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> {
self.data.signable_message(custody_bit) self.data.signable_message(custody_bit)
} }
pub fn verify_signature(
&self,
group_public_key: &AggregatePublicKey,
custody_bit: bool,
// TODO: use domain.
_domain: u64,
) -> bool {
self.aggregate_signature
.verify(&self.signable_message(custody_bit), group_public_key)
}
} }
impl Encodable for Attestation { impl Encodable for Attestation {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.data);
s.append(&self.aggregation_bitfield); s.append(&self.aggregation_bitfield);
s.append(&self.data);
s.append(&self.custody_bitfield); s.append(&self.custody_bitfield);
s.append(&self.aggregate_signature); s.append(&self.aggregate_signature);
} }
@ -34,14 +44,14 @@ impl Encodable for Attestation {
impl Decodable for Attestation { impl Decodable for Attestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (data, i) = AttestationData::ssz_decode(bytes, i)?;
let (aggregation_bitfield, i) = Bitfield::ssz_decode(bytes, i)?; let (aggregation_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (data, i) = AttestationData::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = Bitfield::ssz_decode(bytes, i)?; let (custody_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (aggregate_signature, i) = AggregateSignature::ssz_decode(bytes, i)?; let (aggregate_signature, i) = AggregateSignature::ssz_decode(bytes, i)?;
let attestation_record = Self { let attestation_record = Self {
data,
aggregation_bitfield, aggregation_bitfield,
data,
custody_bitfield, custody_bitfield,
aggregate_signature, aggregate_signature,
}; };
@ -49,22 +59,11 @@ impl Decodable for Attestation {
} }
} }
impl Attestation {
pub fn zero() -> Self {
Self {
data: AttestationData::zero(),
aggregation_bitfield: Bitfield::new(),
custody_bitfield: Bitfield::new(),
aggregate_signature: AggregateSignature::new(),
}
}
}
impl TreeHash for Attestation { impl TreeHash for Attestation {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.data.hash_tree_root());
result.append(&mut self.aggregation_bitfield.hash_tree_root()); result.append(&mut self.aggregation_bitfield.hash_tree_root());
result.append(&mut self.data.hash_tree_root());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.custody_bitfield.hash_tree_root());
result.append(&mut self.aggregate_signature.hash_tree_root()); result.append(&mut self.aggregate_signature.hash_tree_root());
hash(&result) hash(&result)

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{AttestationDataAndCustodyBit, Hash256, Slot}; use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
@ -11,7 +11,7 @@ pub const SSZ_ATTESTION_DATA_LENGTH: usize = {
32 + // epoch_boundary_root 32 + // epoch_boundary_root
32 + // shard_block_hash 32 + // shard_block_hash
32 + // latest_crosslink_hash 32 + // latest_crosslink_hash
8 + // justified_slot 8 + // justified_epoch
32 // justified_block_root 32 // justified_block_root
}; };
@ -22,27 +22,14 @@ pub struct AttestationData {
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,
pub epoch_boundary_root: Hash256, pub epoch_boundary_root: Hash256,
pub shard_block_root: Hash256, pub shard_block_root: Hash256,
pub latest_crosslink_root: Hash256, pub latest_crosslink: Crosslink,
pub justified_slot: Slot, pub justified_epoch: Epoch,
pub justified_block_root: Hash256, pub justified_block_root: Hash256,
} }
impl Eq for AttestationData {} impl Eq for AttestationData {}
impl AttestationData { impl AttestationData {
pub fn zero() -> Self {
Self {
slot: Slot::from(0_u64),
shard: 0,
beacon_block_root: Hash256::zero(),
epoch_boundary_root: Hash256::zero(),
shard_block_root: Hash256::zero(),
latest_crosslink_root: Hash256::zero(),
justified_slot: Slot::from(0_u64),
justified_block_root: Hash256::zero(),
}
}
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
Hash256::from(&self.hash_tree_root()[..]) Hash256::from(&self.hash_tree_root()[..])
} }
@ -63,8 +50,8 @@ impl Encodable for AttestationData {
s.append(&self.beacon_block_root); s.append(&self.beacon_block_root);
s.append(&self.epoch_boundary_root); s.append(&self.epoch_boundary_root);
s.append(&self.shard_block_root); s.append(&self.shard_block_root);
s.append(&self.latest_crosslink_root); s.append(&self.latest_crosslink);
s.append(&self.justified_slot); s.append(&self.justified_epoch);
s.append(&self.justified_block_root); s.append(&self.justified_block_root);
} }
} }
@ -76,8 +63,8 @@ impl Decodable for AttestationData {
let (beacon_block_root, i) = <_>::ssz_decode(bytes, i)?; let (beacon_block_root, i) = <_>::ssz_decode(bytes, i)?;
let (epoch_boundary_root, i) = <_>::ssz_decode(bytes, i)?; let (epoch_boundary_root, i) = <_>::ssz_decode(bytes, i)?;
let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?; let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?;
let (latest_crosslink_root, i) = <_>::ssz_decode(bytes, i)?; let (latest_crosslink, i) = <_>::ssz_decode(bytes, i)?;
let (justified_slot, i) = <_>::ssz_decode(bytes, i)?; let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (justified_block_root, i) = <_>::ssz_decode(bytes, i)?; let (justified_block_root, i) = <_>::ssz_decode(bytes, i)?;
let attestation_data = AttestationData { let attestation_data = AttestationData {
@ -86,8 +73,8 @@ impl Decodable for AttestationData {
beacon_block_root, beacon_block_root,
epoch_boundary_root, epoch_boundary_root,
shard_block_root, shard_block_root,
latest_crosslink_root, latest_crosslink,
justified_slot, justified_epoch,
justified_block_root, justified_block_root,
}; };
Ok((attestation_data, i)) Ok((attestation_data, i))
@ -102,8 +89,8 @@ impl TreeHash for AttestationData {
result.append(&mut self.beacon_block_root.hash_tree_root()); result.append(&mut self.beacon_block_root.hash_tree_root());
result.append(&mut self.epoch_boundary_root.hash_tree_root()); result.append(&mut self.epoch_boundary_root.hash_tree_root());
result.append(&mut self.shard_block_root.hash_tree_root()); result.append(&mut self.shard_block_root.hash_tree_root());
result.append(&mut self.latest_crosslink_root.hash_tree_root()); result.append(&mut self.latest_crosslink.hash_tree_root());
result.append(&mut self.justified_slot.hash_tree_root()); result.append(&mut self.justified_epoch.hash_tree_root());
result.append(&mut self.justified_block_root.hash_tree_root()); result.append(&mut self.justified_block_root.hash_tree_root());
hash(&result) hash(&result)
} }
@ -117,8 +104,8 @@ impl<T: RngCore> TestRandom<T> for AttestationData {
beacon_block_root: <_>::random_for_test(rng), beacon_block_root: <_>::random_for_test(rng),
epoch_boundary_root: <_>::random_for_test(rng), epoch_boundary_root: <_>::random_for_test(rng),
shard_block_root: <_>::random_for_test(rng), shard_block_root: <_>::random_for_test(rng),
latest_crosslink_root: <_>::random_for_test(rng), latest_crosslink: <_>::random_for_test(rng),
justified_slot: <_>::random_for_test(rng), justified_epoch: <_>::random_for_test(rng),
justified_block_root: <_>::random_for_test(rng), justified_block_root: <_>::random_for_test(rng),
} }
} }

View File

@ -2,7 +2,7 @@ use super::AttestationData;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, Clone, PartialEq, Default, Serialize)] #[derive(Debug, Clone, PartialEq, Default, Serialize)]
pub struct AttestationDataAndCustodyBit { pub struct AttestationDataAndCustodyBit {

View File

@ -0,0 +1,80 @@
use crate::{test_utils::TestRandom, SlashableAttestation};
use rand::RngCore;
use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct AttesterSlashing {
pub slashable_attestation_1: SlashableAttestation,
pub slashable_attestation_2: SlashableAttestation,
}
impl Encodable for AttesterSlashing {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slashable_attestation_1);
s.append(&self.slashable_attestation_2);
}
}
impl Decodable for AttesterSlashing {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slashable_attestation_1, i) = <_>::ssz_decode(bytes, i)?;
let (slashable_attestation_2, i) = <_>::ssz_decode(bytes, i)?;
Ok((
AttesterSlashing {
slashable_attestation_1,
slashable_attestation_2,
},
i,
))
}
}
impl TreeHash for AttesterSlashing {
fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![];
result.append(&mut self.slashable_attestation_1.hash_tree_root());
result.append(&mut self.slashable_attestation_2.hash_tree_root());
hash(&result)
}
}
impl<T: RngCore> TestRandom<T> for AttesterSlashing {
fn random_for_test(rng: &mut T) -> Self {
Self {
slashable_attestation_1: <_>::random_for_test(rng),
slashable_attestation_2: <_>::random_for_test(rng),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -17,6 +17,28 @@ pub struct BeaconBlock {
} }
impl BeaconBlock { impl BeaconBlock {
/// Produce the first block of the Beacon Chain.
pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock {
BeaconBlock {
slot: spec.genesis_slot,
parent_root: spec.zero_hash,
state_root,
randao_reveal: spec.empty_signature.clone(),
eth1_data: Eth1Data {
deposit_root: spec.zero_hash,
block_hash: spec.zero_hash,
},
signature: spec.empty_signature.clone(),
body: BeaconBlockBody {
proposer_slashings: vec![],
attester_slashings: vec![],
attestations: vec![],
deposits: vec![],
exits: vec![],
},
}
}
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
Hash256::from(&self.hash_tree_root()[..]) Hash256::from(&self.hash_tree_root()[..])
} }
@ -33,7 +55,7 @@ impl BeaconBlock {
shard: spec.beacon_chain_shard_number, shard: spec.beacon_chain_shard_number,
block_root: block_without_signature_root, block_root: block_without_signature_root,
}; };
Hash256::from_slice(&proposal.hash_tree_root()[..]) Hash256::from(&proposal.hash_tree_root()[..])
} }
} }

View File

@ -1,23 +1,14 @@
use super::{Attestation, CasperSlashing, Deposit, Exit, ProposerSlashing}; use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
// The following types are just dummy classes as they will not be defined until
// Phase 1 (Sharding phase)
type CustodyReseed = usize;
type CustodyChallenge = usize;
type CustodyResponse = usize;
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize)]
pub struct BeaconBlockBody { pub struct BeaconBlockBody {
pub proposer_slashings: Vec<ProposerSlashing>, pub proposer_slashings: Vec<ProposerSlashing>,
pub casper_slashings: Vec<CasperSlashing>, pub attester_slashings: Vec<AttesterSlashing>,
pub attestations: Vec<Attestation>, pub attestations: Vec<Attestation>,
pub custody_reseeds: Vec<CustodyReseed>,
pub custody_challenges: Vec<CustodyChallenge>,
pub custody_responses: Vec<CustodyResponse>,
pub deposits: Vec<Deposit>, pub deposits: Vec<Deposit>,
pub exits: Vec<Exit>, pub exits: Vec<Exit>,
} }
@ -25,11 +16,8 @@ pub struct BeaconBlockBody {
impl Encodable for BeaconBlockBody { impl Encodable for BeaconBlockBody {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.proposer_slashings); s.append_vec(&self.proposer_slashings);
s.append_vec(&self.casper_slashings); s.append_vec(&self.attester_slashings);
s.append_vec(&self.attestations); s.append_vec(&self.attestations);
s.append_vec(&self.custody_reseeds);
s.append_vec(&self.custody_challenges);
s.append_vec(&self.custody_responses);
s.append_vec(&self.deposits); s.append_vec(&self.deposits);
s.append_vec(&self.exits); s.append_vec(&self.exits);
} }
@ -38,22 +26,16 @@ impl Encodable for BeaconBlockBody {
impl Decodable for BeaconBlockBody { impl Decodable for BeaconBlockBody {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (proposer_slashings, i) = <_>::ssz_decode(bytes, i)?; let (proposer_slashings, i) = <_>::ssz_decode(bytes, i)?;
let (casper_slashings, i) = <_>::ssz_decode(bytes, i)?; let (attester_slashings, i) = <_>::ssz_decode(bytes, i)?;
let (attestations, i) = <_>::ssz_decode(bytes, i)?; let (attestations, i) = <_>::ssz_decode(bytes, i)?;
let (custody_reseeds, i) = <_>::ssz_decode(bytes, i)?;
let (custody_challenges, i) = <_>::ssz_decode(bytes, i)?;
let (custody_responses, i) = <_>::ssz_decode(bytes, i)?;
let (deposits, i) = <_>::ssz_decode(bytes, i)?; let (deposits, i) = <_>::ssz_decode(bytes, i)?;
let (exits, i) = <_>::ssz_decode(bytes, i)?; let (exits, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
proposer_slashings, proposer_slashings,
casper_slashings, attester_slashings,
attestations, attestations,
custody_reseeds,
custody_challenges,
custody_responses,
deposits, deposits,
exits, exits,
}, },
@ -66,11 +48,8 @@ impl TreeHash for BeaconBlockBody {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.proposer_slashings.hash_tree_root()); result.append(&mut self.proposer_slashings.hash_tree_root());
result.append(&mut self.casper_slashings.hash_tree_root()); result.append(&mut self.attester_slashings.hash_tree_root());
result.append(&mut self.attestations.hash_tree_root()); result.append(&mut self.attestations.hash_tree_root());
result.append(&mut self.custody_reseeds.hash_tree_root());
result.append(&mut self.custody_challenges.hash_tree_root());
result.append(&mut self.custody_responses.hash_tree_root());
result.append(&mut self.deposits.hash_tree_root()); result.append(&mut self.deposits.hash_tree_root());
result.append(&mut self.exits.hash_tree_root()); result.append(&mut self.exits.hash_tree_root());
hash(&result) hash(&result)
@ -81,11 +60,8 @@ impl<T: RngCore> TestRandom<T> for BeaconBlockBody {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
proposer_slashings: <_>::random_for_test(rng), proposer_slashings: <_>::random_for_test(rng),
casper_slashings: <_>::random_for_test(rng), attester_slashings: <_>::random_for_test(rng),
attestations: <_>::random_for_test(rng), attestations: <_>::random_for_test(rng),
custody_reseeds: <_>::random_for_test(rng),
custody_challenges: <_>::random_for_test(rng),
custody_responses: <_>::random_for_test(rng),
deposits: <_>::random_for_test(rng), deposits: <_>::random_for_test(rng),
exits: <_>::random_for_test(rng), exits: <_>::random_for_test(rng),
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,12 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{Hash256, Slot}; use crate::{Epoch, Hash256};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Clone, Debug, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)]
pub struct Crosslink { pub struct Crosslink {
pub slot: Slot, pub epoch: Epoch,
pub shard_block_root: Hash256, pub shard_block_root: Hash256,
} }
@ -14,7 +14,7 @@ impl Crosslink {
/// Generates a new instance where `dynasty` and `hash` are both zero. /// Generates a new instance where `dynasty` and `hash` are both zero.
pub fn zero() -> Self { pub fn zero() -> Self {
Self { Self {
slot: Slot::from(0_u64), epoch: Epoch::new(0),
shard_block_root: Hash256::zero(), shard_block_root: Hash256::zero(),
} }
} }
@ -22,19 +22,19 @@ impl Crosslink {
impl Encodable for Crosslink { impl Encodable for Crosslink {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot); s.append(&self.epoch);
s.append(&self.shard_block_root); s.append(&self.shard_block_root);
} }
} }
impl Decodable for Crosslink { impl Decodable for Crosslink {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?; let (epoch, i) = <_>::ssz_decode(bytes, i)?;
let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?; let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
slot, epoch,
shard_block_root, shard_block_root,
}, },
i, i,
@ -45,7 +45,7 @@ impl Decodable for Crosslink {
impl TreeHash for Crosslink { impl TreeHash for Crosslink {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root());
result.append(&mut self.shard_block_root.hash_tree_root()); result.append(&mut self.shard_block_root.hash_tree_root());
hash(&result) hash(&result)
} }
@ -54,7 +54,7 @@ impl TreeHash for Crosslink {
impl<T: RngCore> TestRandom<T> for Crosslink { impl<T: RngCore> TestRandom<T> for Crosslink {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
slot: <_>::random_for_test(rng), epoch: <_>::random_for_test(rng),
shard_block_root: <_>::random_for_test(rng), shard_block_root: <_>::random_for_test(rng),
} }
} }

View File

@ -6,29 +6,29 @@ use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize)]
pub struct Deposit { pub struct Deposit {
pub merkle_branch: Vec<Hash256>, pub branch: Vec<Hash256>,
pub merkle_tree_index: u64, pub index: u64,
pub deposit_data: DepositData, pub deposit_data: DepositData,
} }
impl Encodable for Deposit { impl Encodable for Deposit {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.merkle_branch); s.append_vec(&self.branch);
s.append(&self.merkle_tree_index); s.append(&self.index);
s.append(&self.deposit_data); s.append(&self.deposit_data);
} }
} }
impl Decodable for Deposit { impl Decodable for Deposit {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (merkle_branch, i) = <_>::ssz_decode(bytes, i)?; let (branch, i) = <_>::ssz_decode(bytes, i)?;
let (merkle_tree_index, i) = <_>::ssz_decode(bytes, i)?; let (index, i) = <_>::ssz_decode(bytes, i)?;
let (deposit_data, i) = <_>::ssz_decode(bytes, i)?; let (deposit_data, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
merkle_branch, branch,
merkle_tree_index, index,
deposit_data, deposit_data,
}, },
i, i,
@ -39,8 +39,8 @@ impl Decodable for Deposit {
impl TreeHash for Deposit { impl TreeHash for Deposit {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.merkle_branch.hash_tree_root()); result.append(&mut self.branch.hash_tree_root());
result.append(&mut self.merkle_tree_index.hash_tree_root()); result.append(&mut self.index.hash_tree_root());
result.append(&mut self.deposit_data.hash_tree_root()); result.append(&mut self.deposit_data.hash_tree_root());
hash(&result) hash(&result)
} }
@ -49,8 +49,8 @@ impl TreeHash for Deposit {
impl<T: RngCore> TestRandom<T> for Deposit { impl<T: RngCore> TestRandom<T> for Deposit {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
merkle_branch: <_>::random_for_test(rng), branch: <_>::random_for_test(rng),
merkle_tree_index: <_>::random_for_test(rng), index: <_>::random_for_test(rng),
deposit_data: <_>::random_for_test(rng), deposit_data: <_>::random_for_test(rng),
} }
} }

View File

@ -1,4 +1,4 @@
use crate::{test_utils::TestRandom, Slot}; use crate::{test_utils::TestRandom, Epoch};
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
@ -6,14 +6,14 @@ use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize)]
pub struct Exit { pub struct Exit {
pub slot: Slot, pub epoch: Epoch,
pub validator_index: u32, pub validator_index: u64,
pub signature: Signature, pub signature: Signature,
} }
impl Encodable for Exit { impl Encodable for Exit {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot); s.append(&self.epoch);
s.append(&self.validator_index); s.append(&self.validator_index);
s.append(&self.signature); s.append(&self.signature);
} }
@ -21,13 +21,13 @@ impl Encodable for Exit {
impl Decodable for Exit { impl Decodable for Exit {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?; let (epoch, i) = <_>::ssz_decode(bytes, i)?;
let (validator_index, i) = <_>::ssz_decode(bytes, i)?; let (validator_index, i) = <_>::ssz_decode(bytes, i)?;
let (signature, i) = <_>::ssz_decode(bytes, i)?; let (signature, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
slot, epoch,
validator_index, validator_index,
signature, signature,
}, },
@ -39,7 +39,7 @@ impl Decodable for Exit {
impl TreeHash for Exit { impl TreeHash for Exit {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root());
result.append(&mut self.validator_index.hash_tree_root()); result.append(&mut self.validator_index.hash_tree_root());
result.append(&mut self.signature.hash_tree_root()); result.append(&mut self.signature.hash_tree_root());
hash(&result) hash(&result)
@ -49,7 +49,7 @@ impl TreeHash for Exit {
impl<T: RngCore> TestRandom<T> for Exit { impl<T: RngCore> TestRandom<T> for Exit {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
slot: <_>::random_for_test(rng), epoch: <_>::random_for_test(rng),
validator_index: <_>::random_for_test(rng), validator_index: <_>::random_for_test(rng),
signature: <_>::random_for_test(rng), signature: <_>::random_for_test(rng),
} }

View File

@ -1,34 +1,34 @@
use crate::{test_utils::TestRandom, Slot}; use crate::{test_utils::TestRandom, Epoch};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, Clone, PartialEq, Default, Serialize)] #[derive(Debug, Clone, PartialEq, Default, Serialize)]
pub struct Fork { pub struct Fork {
pub pre_fork_version: u64, pub previous_version: u64,
pub post_fork_version: u64, pub current_version: u64,
pub fork_slot: Slot, pub epoch: Epoch,
} }
impl Encodable for Fork { impl Encodable for Fork {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.pre_fork_version); s.append(&self.previous_version);
s.append(&self.post_fork_version); s.append(&self.current_version);
s.append(&self.fork_slot); s.append(&self.epoch);
} }
} }
impl Decodable for Fork { impl Decodable for Fork {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (pre_fork_version, i) = <_>::ssz_decode(bytes, i)?; let (previous_version, i) = <_>::ssz_decode(bytes, i)?;
let (post_fork_version, i) = <_>::ssz_decode(bytes, i)?; let (current_version, i) = <_>::ssz_decode(bytes, i)?;
let (fork_slot, i) = <_>::ssz_decode(bytes, i)?; let (epoch, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
pre_fork_version, previous_version,
post_fork_version, current_version,
fork_slot, epoch,
}, },
i, i,
)) ))
@ -38,9 +38,9 @@ impl Decodable for Fork {
impl TreeHash for Fork { impl TreeHash for Fork {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.pre_fork_version.hash_tree_root()); result.append(&mut self.previous_version.hash_tree_root());
result.append(&mut self.post_fork_version.hash_tree_root()); result.append(&mut self.current_version.hash_tree_root());
result.append(&mut self.fork_slot.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root());
hash(&result) hash(&result)
} }
} }
@ -48,9 +48,9 @@ impl TreeHash for Fork {
impl<T: RngCore> TestRandom<T> for Fork { impl<T: RngCore> TestRandom<T> for Fork {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
Self { Self {
pre_fork_version: <_>::random_for_test(rng), previous_version: <_>::random_for_test(rng),
post_fork_version: <_>::random_for_test(rng), current_version: <_>::random_for_test(rng),
fork_slot: <_>::random_for_test(rng), epoch: <_>::random_for_test(rng),
} }
} }
} }

View File

@ -3,6 +3,7 @@ pub mod test_utils;
pub mod attestation; pub mod attestation;
pub mod attestation_data; pub mod attestation_data;
pub mod attestation_data_and_custody_bit; pub mod attestation_data_and_custody_bit;
pub mod attester_slashing;
pub mod beacon_block; pub mod beacon_block;
pub mod beacon_block_body; pub mod beacon_block_body;
pub mod beacon_state; pub mod beacon_state;
@ -20,12 +21,11 @@ pub mod pending_attestation;
pub mod proposal_signed_data; pub mod proposal_signed_data;
pub mod proposer_slashing; pub mod proposer_slashing;
pub mod readers; pub mod readers;
pub mod shard_committee;
pub mod shard_reassignment_record; pub mod shard_reassignment_record;
pub mod slashable_attestation;
pub mod slashable_vote_data; pub mod slashable_vote_data;
pub mod slot_epoch; pub mod slot_epoch;
pub mod spec; pub mod spec;
pub mod special_record;
pub mod validator; pub mod validator;
pub mod validator_registry; pub mod validator_registry;
pub mod validator_registry_delta_block; pub mod validator_registry_delta_block;
@ -36,6 +36,7 @@ use std::collections::HashMap;
pub use crate::attestation::Attestation; pub use crate::attestation::Attestation;
pub use crate::attestation_data::AttestationData; pub use crate::attestation_data::AttestationData;
pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit;
pub use crate::attester_slashing::AttesterSlashing;
pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block::BeaconBlock;
pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_body::BeaconBlockBody;
pub use crate::beacon_state::BeaconState; pub use crate::beacon_state::BeaconState;
@ -52,11 +53,10 @@ pub use crate::free_attestation::FreeAttestation;
pub use crate::pending_attestation::PendingAttestation; pub use crate::pending_attestation::PendingAttestation;
pub use crate::proposal_signed_data::ProposalSignedData; pub use crate::proposal_signed_data::ProposalSignedData;
pub use crate::proposer_slashing::ProposerSlashing; pub use crate::proposer_slashing::ProposerSlashing;
pub use crate::shard_committee::ShardCommittee; pub use crate::slashable_attestation::SlashableAttestation;
pub use crate::slashable_vote_data::SlashableVoteData; pub use crate::slashable_vote_data::SlashableVoteData;
pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_epoch::{Epoch, Slot};
pub use crate::spec::ChainSpec; pub use crate::spec::ChainSpec;
pub use crate::special_record::{SpecialRecord, SpecialRecordKind};
pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator}; pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator};
pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock; pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock;

View File

@ -6,34 +6,34 @@ use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, Clone, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize)]
pub struct PendingAttestation { pub struct PendingAttestation {
pub data: AttestationData,
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData,
pub custody_bitfield: Bitfield, pub custody_bitfield: Bitfield,
pub slot_included: Slot, pub inclusion_slot: Slot,
} }
impl Encodable for PendingAttestation { impl Encodable for PendingAttestation {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.data);
s.append(&self.aggregation_bitfield); s.append(&self.aggregation_bitfield);
s.append(&self.data);
s.append(&self.custody_bitfield); s.append(&self.custody_bitfield);
s.append(&self.slot_included); s.append(&self.inclusion_slot);
} }
} }
impl Decodable for PendingAttestation { impl Decodable for PendingAttestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (data, i) = <_>::ssz_decode(bytes, i)?;
let (aggregation_bitfield, i) = <_>::ssz_decode(bytes, i)?; let (aggregation_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (data, i) = <_>::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?; let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (slot_included, i) = <_>::ssz_decode(bytes, i)?; let (inclusion_slot, i) = <_>::ssz_decode(bytes, i)?;
Ok(( Ok((
Self { Self {
data, data,
aggregation_bitfield, aggregation_bitfield,
custody_bitfield, custody_bitfield,
slot_included, inclusion_slot,
}, },
i, i,
)) ))
@ -43,10 +43,10 @@ impl Decodable for PendingAttestation {
impl TreeHash for PendingAttestation { impl TreeHash for PendingAttestation {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.data.hash_tree_root());
result.append(&mut self.aggregation_bitfield.hash_tree_root()); result.append(&mut self.aggregation_bitfield.hash_tree_root());
result.append(&mut self.data.hash_tree_root());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.custody_bitfield.hash_tree_root());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.inclusion_slot.hash_tree_root());
hash(&result) hash(&result)
} }
} }
@ -57,7 +57,7 @@ impl<T: RngCore> TestRandom<T> for PendingAttestation {
data: <_>::random_for_test(rng), data: <_>::random_for_test(rng),
aggregation_bitfield: <_>::random_for_test(rng), aggregation_bitfield: <_>::random_for_test(rng),
custody_bitfield: <_>::random_for_test(rng), custody_bitfield: <_>::random_for_test(rng),
slot_included: <_>::random_for_test(rng), inclusion_slot: <_>::random_for_test(rng),
} }
} }
} }

View File

@ -7,7 +7,7 @@ use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize)]
pub struct ProposerSlashing { pub struct ProposerSlashing {
pub proposer_index: u32, pub proposer_index: u64,
pub proposal_data_1: ProposalSignedData, pub proposal_data_1: ProposalSignedData,
pub proposal_signature_1: Signature, pub proposal_signature_1: Signature,
pub proposal_data_2: ProposalSignedData, pub proposal_data_2: ProposalSignedData,

View File

@ -1,74 +0,0 @@
use crate::test_utils::TestRandom;
use rand::RngCore;
use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Clone, Debug, PartialEq, Serialize)]
pub struct ShardCommittee {
pub shard: u64,
pub committee: Vec<usize>,
}
impl Encodable for ShardCommittee {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.shard);
s.append(&self.committee);
}
}
impl Decodable for ShardCommittee {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (shard, i) = <_>::ssz_decode(bytes, i)?;
let (committee, i) = <_>::ssz_decode(bytes, i)?;
Ok((Self { shard, committee }, i))
}
}
impl TreeHash for ShardCommittee {
fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![];
result.append(&mut self.shard.hash_tree_root());
result.append(&mut self.committee.hash_tree_root());
hash(&result)
}
}
impl<T: RngCore> TestRandom<T> for ShardCommittee {
fn random_for_test(rng: &mut T) -> Self {
Self {
shard: <_>::random_for_test(rng),
committee: <_>::random_for_test(rng),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardCommittee::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardCommittee::random_for_test(&mut rng);
let result = original.hash_tree_root();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -0,0 +1,92 @@
use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield};
use rand::RngCore;
use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct SlashableAttestation {
pub validator_indices: Vec<u64>,
pub data: AttestationData,
pub custody_bitfield: Bitfield,
pub aggregate_signature: AggregateSignature,
}
impl Encodable for SlashableAttestation {
fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.validator_indices);
s.append(&self.data);
s.append(&self.custody_bitfield);
s.append(&self.aggregate_signature);
}
}
impl Decodable for SlashableAttestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (validator_indices, i) = <_>::ssz_decode(bytes, i)?;
let (data, i) = <_>::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (aggregate_signature, i) = <_>::ssz_decode(bytes, i)?;
Ok((
SlashableAttestation {
validator_indices,
data,
custody_bitfield,
aggregate_signature,
},
i,
))
}
}
impl TreeHash for SlashableAttestation {
fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![];
result.append(&mut self.validator_indices.hash_tree_root());
result.append(&mut self.data.hash_tree_root());
result.append(&mut self.custody_bitfield.hash_tree_root());
result.append(&mut self.aggregate_signature.hash_tree_root());
hash(&result)
}
}
impl<T: RngCore> TestRandom<T> for SlashableAttestation {
fn random_for_test(rng: &mut T) -> Self {
Self {
validator_indices: <_>::random_for_test(rng),
data: <_>::random_for_test(rng),
custody_bitfield: <_>::random_for_test(rng),
aggregate_signature: <_>::random_for_test(rng),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -13,9 +13,10 @@ use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use slog; use slog;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
use std::cmp::{Ord, Ordering}; use std::cmp::{Ord, Ordering};
use std::fmt; use std::fmt;
use std::hash::{Hash, Hasher};
use std::iter::Iterator; use std::iter::Iterator;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign};
@ -163,6 +164,10 @@ macro_rules! impl_math {
*self - other.into() *self - other.into()
} }
pub fn saturating_add<T: Into<$type>>(&self, other: T) -> $type {
*self + other.into()
}
pub fn checked_div<T: Into<$type>>(&self, rhs: T) -> Option<$type> { pub fn checked_div<T: Into<$type>>(&self, rhs: T) -> Option<$type> {
let rhs: $type = rhs.into(); let rhs: $type = rhs.into();
if rhs == 0 { if rhs == 0 {
@ -239,6 +244,18 @@ macro_rules! impl_ssz {
}; };
} }
macro_rules! impl_hash {
($type: ident) => {
// Implemented to stop clippy lint:
// https://rust-lang.github.io/rust-clippy/master/index.html#derive_hash_xor_eq
impl Hash for $type {
fn hash<H: Hasher>(&self, state: &mut H) {
ssz_encode(self).hash(state)
}
}
};
}
macro_rules! impl_common { macro_rules! impl_common {
($type: ident) => { ($type: ident) => {
impl_from_into_u64!($type); impl_from_into_u64!($type);
@ -248,13 +265,14 @@ macro_rules! impl_common {
impl_math!($type); impl_math!($type);
impl_display!($type); impl_display!($type);
impl_ssz!($type); impl_ssz!($type);
impl_hash!($type);
}; };
} }
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Hash)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize)]
pub struct Slot(u64); pub struct Slot(u64);
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Hash)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize)]
pub struct Epoch(u64); pub struct Epoch(u64);
impl_common!(Slot); impl_common!(Slot);
@ -265,7 +283,7 @@ impl Slot {
Slot(slot) Slot(slot)
} }
pub fn epoch(&self, epoch_length: u64) -> Epoch { pub fn epoch(self, epoch_length: u64) -> Epoch {
Epoch::from(self.0 / epoch_length) Epoch::from(self.0 / epoch_length)
} }
@ -279,11 +297,15 @@ impl Epoch {
Epoch(slot) Epoch(slot)
} }
pub fn start_slot(&self, epoch_length: u64) -> Slot { pub fn max_value() -> Epoch {
Epoch(u64::max_value())
}
pub fn start_slot(self, epoch_length: u64) -> Slot {
Slot::from(self.0.saturating_mul(epoch_length)) Slot::from(self.0.saturating_mul(epoch_length))
} }
pub fn end_slot(&self, epoch_length: u64) -> Slot { pub fn end_slot(self, epoch_length: u64) -> Slot {
Slot::from( Slot::from(
self.0 self.0
.saturating_add(1) .saturating_add(1)
@ -527,6 +549,23 @@ mod tests {
assert_saturating_sub(1, 2, 0); assert_saturating_sub(1, 2, 0);
} }
#[test]
fn saturating_add() {
let assert_saturating_add = |a: u64, b: u64, result: u64| {
assert_eq!($type(a).saturating_add($type(b)), $type(result));
};
assert_saturating_add(0, 1, 1);
assert_saturating_add(1, 0, 1);
assert_saturating_add(1, 2, 3);
assert_saturating_add(2, 1, 3);
assert_saturating_add(7, 7, 14);
// Addition should be saturating.
assert_saturating_add(u64::max_value(), 1, u64::max_value());
assert_saturating_add(u64::max_value(), u64::max_value(), u64::max_value());
}
#[test] #[test]
fn checked_div() { fn checked_div() {
let assert_checked_div = |a: u64, b: u64, result: Option<u64>| { let assert_checked_div = |a: u64, b: u64, result: Option<u64>| {

View File

@ -1,134 +1,105 @@
use super::ChainSpec; use crate::{Address, ChainSpec, Epoch, Hash256, Signature, Slot};
use bls::{Keypair, PublicKey, SecretKey, Signature};
use crate::{Address, Eth1Data, Hash256, Slot, Validator}; const GWEI: u64 = 1_000_000_000;
/// The size of a validators deposit in GWei.
pub const DEPOSIT_GWEI: u64 = 32_000_000_000;
impl ChainSpec { impl ChainSpec {
/// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation. /// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation.
/// ///
/// Of course, the actual foundation specs are unknown at this point so these are just a rough /// Of course, the actual foundation specs are unknown at this point so these are just a rough
/// estimate. /// estimate.
///
/// Spec v0.2.0
pub fn foundation() -> Self { pub fn foundation() -> Self {
let genesis_slot = Slot::new(2_u64.pow(19));
let epoch_length = 64;
let genesis_epoch = genesis_slot.epoch(epoch_length);
Self { Self {
/* /*
* Misc * Misc
*/ */
shard_count: 1_024, shard_count: 1_024,
target_committee_size: 128, target_committee_size: 128,
ejection_balance: 16 * u64::pow(10, 9),
max_balance_churn_quotient: 32, max_balance_churn_quotient: 32,
beacon_chain_shard_number: u64::max_value(), beacon_chain_shard_number: u64::max_value(),
max_casper_votes: 1_024, max_indices_per_slashable_vote: 4_096,
latest_block_roots_length: 8_192,
latest_randao_mixes_length: 8_192,
latest_penalized_exit_length: 8_192,
max_withdrawals_per_epoch: 4, max_withdrawals_per_epoch: 4,
shuffle_round_count: 90,
/* /*
* Deposit contract * Deposit contract
*/ */
deposit_contract_address: Address::from("TBD".as_bytes()), deposit_contract_address: Address::zero(),
deposit_contract_tree_depth: 32, deposit_contract_tree_depth: 32,
min_deposit: 1 * u64::pow(10, 9),
max_deposit: 32 * u64::pow(10, 9), /*
* Gwei values
*/
min_deposit_amount: u64::pow(2, 0) * GWEI,
max_deposit_amount: u64::pow(2, 5) * GWEI,
fork_choice_balance_increment: u64::pow(2, 0) * GWEI,
ejection_balance: u64::pow(2, 4) * GWEI,
/* /*
* Initial Values * Initial Values
*/ */
genesis_fork_version: 0, genesis_fork_version: 0,
genesis_slot: Slot::from(0_u64), genesis_slot: Slot::new(2_u64.pow(19)),
genesis_epoch,
genesis_start_shard: 0, genesis_start_shard: 0,
far_future_slot: Slot::from(u64::max_value()), far_future_epoch: Epoch::new(u64::max_value()),
zero_hash: Hash256::zero(), zero_hash: Hash256::zero(),
empty_signature: Signature::empty_signature(), empty_signature: Signature::empty_signature(),
bls_withdrawal_prefix_byte: 0x00, bls_withdrawal_prefix_byte: 0,
/* /*
* Time parameters * Time parameters
*/ */
slot_duration: 6, slot_duration: 6,
min_attestation_inclusion_delay: 4, min_attestation_inclusion_delay: 4,
epoch_length: 64, epoch_length,
seed_lookahead: 64, seed_lookahead: Epoch::new(1),
entry_exit_delay: 256, entry_exit_delay: 4,
eth1_data_voting_period: 1_024, eth1_data_voting_period: 16,
min_validator_withdrawal_time: u64::pow(2, 14), min_validator_withdrawal_epochs: Epoch::new(256),
/*
* State list lengths
*/
latest_block_roots_length: 8_192,
latest_randao_mixes_length: 8_192,
latest_index_roots_length: 8_192,
latest_penalized_exit_length: 8_192,
/* /*
* Reward and penalty quotients * Reward and penalty quotients
*/ */
base_reward_quotient: 32, base_reward_quotient: 32,
whistleblower_reward_quotient: 512, whistleblower_reward_quotient: 512,
includer_reward_quotient: 8, includer_reward_quotient: 8,
inactivity_penalty_quotient: u64::pow(2, 24), inactivity_penalty_quotient: 16_777_216,
/* /*
* Max operations per block * Max operations per block
*/ */
max_proposer_slashings: 16, max_proposer_slashings: 16,
max_casper_slashings: 16, max_attester_slashings: 1,
max_attestations: 128, max_attestations: 128,
max_deposits: 16, max_deposits: 16,
max_exits: 16, max_exits: 16,
/* /*
* Intialization parameters * Signature domains
*/ */
initial_validators: initial_validators_for_testing(), domain_deposit: 0,
initial_balances: initial_balances_for_testing(), domain_attestation: 1,
genesis_time: 1_544_672_897, domain_proposal: 2,
intial_eth1_data: Eth1Data { domain_exit: 3,
deposit_root: Hash256::from("deposit_root".as_bytes()), domain_randao: 4,
block_hash: Hash256::from("block_hash".as_bytes()),
},
} }
} }
} }
/// Generate a set of validator records to use with testing until the real chain starts.
fn initial_validators_for_testing() -> Vec<Validator> {
// Some dummy private keys to start with.
let key_strings = vec![
"jzjxxgjajfjrmgodszzsgqccmhnyvetcuxobhtynojtpdtbj",
"gpeehcjudxdijzhjgirfuhahmnjutlchjmoffxmimbdejakd",
"ntrrdwwebodokuwaclhoqreqyodngoyhurvesghjfxeswoaj",
"cibmzkqrzdgdlrvqaxinwpvyhcgjkeysrsjkqtkcxvznsvth",
"erqrfuahdwprsstkawggounxmihzhrvbhchcyiwtaypqcedr",
];
let mut initial_validators = Vec::with_capacity(key_strings.len());
for key_string in key_strings {
let keypair = {
let secret_key = match SecretKey::from_bytes(&key_string.as_bytes()) {
Ok(key) => key,
Err(_) => unreachable!(), // Keys are static and should not fail.
};
let public_key = PublicKey::from_secret_key(&secret_key);
Keypair {
sk: secret_key,
pk: public_key,
}
};
let validator = Validator {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
proposer_slots: 0,
activation_slot: Slot::max_value(),
exit_slot: Slot::max_value(),
withdrawal_slot: Slot::max_value(),
penalized_slot: Slot::max_value(),
exit_count: 0,
status_flags: None,
latest_custody_reseed_slot: Slot::from(0_u64),
penultimate_custody_reseed_slot: Slot::from(0_u64),
};
initial_validators.push(validator);
}
initial_validators
}
fn initial_balances_for_testing() -> Vec<u64> {
vec![DEPOSIT_GWEI; 4]
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,8 +1,11 @@
mod foundation; mod foundation;
use crate::{Address, Eth1Data, Hash256, Slot, Validator}; use crate::{Address, Epoch, Hash256, Slot};
use bls::Signature; use bls::Signature;
/// Holds all the "constants" for a BeaconChain.
///
/// Spec v0.2.0
#[derive(PartialEq, Debug, Clone)] #[derive(PartialEq, Debug, Clone)]
pub struct ChainSpec { pub struct ChainSpec {
/* /*
@ -10,41 +13,57 @@ pub struct ChainSpec {
*/ */
pub shard_count: u64, pub shard_count: u64,
pub target_committee_size: u64, pub target_committee_size: u64,
pub ejection_balance: u64,
pub max_balance_churn_quotient: u64, pub max_balance_churn_quotient: u64,
pub beacon_chain_shard_number: u64, pub beacon_chain_shard_number: u64,
pub max_casper_votes: u64, pub max_indices_per_slashable_vote: u64,
pub latest_block_roots_length: u64,
pub latest_randao_mixes_length: u64,
pub latest_penalized_exit_length: u64,
pub max_withdrawals_per_epoch: u64, pub max_withdrawals_per_epoch: u64,
pub shuffle_round_count: u64,
/* /*
* Deposit contract * Deposit contract
*/ */
pub deposit_contract_address: Address, pub deposit_contract_address: Address,
pub deposit_contract_tree_depth: u64, pub deposit_contract_tree_depth: u64,
pub min_deposit: u64,
pub max_deposit: u64, /*
* Gwei values
*/
pub min_deposit_amount: u64,
pub max_deposit_amount: u64,
pub fork_choice_balance_increment: u64,
pub ejection_balance: u64,
/* /*
* Initial Values * Initial Values
*/ */
pub genesis_fork_version: u64, pub genesis_fork_version: u64,
pub genesis_slot: Slot, pub genesis_slot: Slot,
pub genesis_epoch: Epoch,
pub genesis_start_shard: u64, pub genesis_start_shard: u64,
pub far_future_slot: Slot, pub far_future_epoch: Epoch,
pub zero_hash: Hash256, pub zero_hash: Hash256,
pub empty_signature: Signature, pub empty_signature: Signature,
pub bls_withdrawal_prefix_byte: u8, pub bls_withdrawal_prefix_byte: u8,
/* /*
* Time parameters * Time parameters
*/ */
pub slot_duration: u64, pub slot_duration: u64,
pub min_attestation_inclusion_delay: u64, pub min_attestation_inclusion_delay: u64,
pub epoch_length: u64, pub epoch_length: u64,
pub seed_lookahead: u64, pub seed_lookahead: Epoch,
pub entry_exit_delay: u64, pub entry_exit_delay: u64,
pub eth1_data_voting_period: u64, pub eth1_data_voting_period: u64,
pub min_validator_withdrawal_time: u64, pub min_validator_withdrawal_epochs: Epoch,
/*
* State list lengths
*/
pub latest_block_roots_length: usize,
pub latest_randao_mixes_length: usize,
pub latest_index_roots_length: usize,
pub latest_penalized_exit_length: usize,
/* /*
* Reward and penalty quotients * Reward and penalty quotients
*/ */
@ -52,19 +71,22 @@ pub struct ChainSpec {
pub whistleblower_reward_quotient: u64, pub whistleblower_reward_quotient: u64,
pub includer_reward_quotient: u64, pub includer_reward_quotient: u64,
pub inactivity_penalty_quotient: u64, pub inactivity_penalty_quotient: u64,
/* /*
* Max operations per block * Max operations per block
*/ */
pub max_proposer_slashings: u64, pub max_proposer_slashings: u64,
pub max_casper_slashings: u64, pub max_attester_slashings: u64,
pub max_attestations: u64, pub max_attestations: u64,
pub max_deposits: u64, pub max_deposits: u64,
pub max_exits: u64, pub max_exits: u64,
/* /*
* Intialization parameters * Signature domains
*/ */
pub initial_validators: Vec<Validator>, pub domain_deposit: u64,
pub initial_balances: Vec<u64>, pub domain_attestation: u64,
pub genesis_time: u64, pub domain_proposal: u64,
pub intial_eth1_data: Eth1Data, pub domain_exit: u64,
pub domain_randao: u64,
} }

View File

@ -1,142 +0,0 @@
use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
/// The value of the "type" field of SpecialRecord.
///
/// Note: this value must serialize to a u8 and therefore must not be greater than 255.
#[derive(Debug, PartialEq, Clone, Copy, Serialize)]
pub enum SpecialRecordKind {
Logout = 0,
CasperSlashing = 1,
RandaoChange = 2,
}
/// The structure used in the `BeaconBlock.specials` field.
#[derive(Debug, PartialEq, Clone)]
pub struct SpecialRecord {
pub kind: u8,
pub data: Vec<u8>,
}
impl SpecialRecord {
pub fn logout(data: &[u8]) -> Self {
Self {
kind: SpecialRecordKind::Logout as u8,
data: data.to_vec(),
}
}
pub fn casper_slashing(data: &[u8]) -> Self {
Self {
kind: SpecialRecordKind::CasperSlashing as u8,
data: data.to_vec(),
}
}
pub fn randao_change(data: &[u8]) -> Self {
Self {
kind: SpecialRecordKind::RandaoChange as u8,
data: data.to_vec(),
}
}
/// Match `self.kind` to a `SpecialRecordKind`.
///
/// Returns `None` if `self.kind` is an unknown value.
pub fn resolve_kind(&self) -> Option<SpecialRecordKind> {
match self.kind {
x if x == SpecialRecordKind::Logout as u8 => Some(SpecialRecordKind::Logout),
x if x == SpecialRecordKind::CasperSlashing as u8 => {
Some(SpecialRecordKind::CasperSlashing)
}
x if x == SpecialRecordKind::RandaoChange as u8 => {
Some(SpecialRecordKind::RandaoChange)
}
_ => None,
}
}
}
impl Encodable for SpecialRecord {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.kind);
s.append_vec(&self.data);
}
}
impl Decodable for SpecialRecord {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (kind, i) = u8::ssz_decode(bytes, i)?;
let (data, i) = Decodable::ssz_decode(bytes, i)?;
Ok((SpecialRecord { kind, data }, i))
}
}
impl TreeHash for SpecialRecord {
fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![];
result.append(&mut self.kind.hash_tree_root());
result.append(&mut self.data.as_slice().hash_tree_root());
hash(&result)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_special_record_ssz_encode() {
let s = SpecialRecord::logout(&vec![]);
let mut ssz_stream = SszStream::new();
ssz_stream.append(&s);
let ssz = ssz_stream.drain();
assert_eq!(ssz, vec![0, 0, 0, 0, 0]);
let s = SpecialRecord::casper_slashing(&vec![]);
let mut ssz_stream = SszStream::new();
ssz_stream.append(&s);
let ssz = ssz_stream.drain();
assert_eq!(ssz, vec![1, 0, 0, 0, 0]);
let s = SpecialRecord::randao_change(&vec![]);
let mut ssz_stream = SszStream::new();
ssz_stream.append(&s);
let ssz = ssz_stream.drain();
assert_eq!(ssz, vec![2, 0, 0, 0, 0]);
let s = SpecialRecord::randao_change(&vec![42, 43, 44]);
let mut ssz_stream = SszStream::new();
ssz_stream.append(&s);
let ssz = ssz_stream.drain();
assert_eq!(ssz, vec![2, 0, 0, 0, 3, 42, 43, 44]);
}
#[test]
pub fn test_special_record_ssz_encode_decode() {
let s = SpecialRecord::randao_change(&vec![13, 16, 14]);
let mut ssz_stream = SszStream::new();
ssz_stream.append(&s);
let ssz = ssz_stream.drain();
let (s_decoded, _) = SpecialRecord::ssz_decode(&ssz, 0).unwrap();
assert_eq!(s, s_decoded);
}
#[test]
pub fn test_special_record_resolve_kind() {
let s = SpecialRecord::logout(&vec![]);
assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::Logout));
let s = SpecialRecord::casper_slashing(&vec![]);
assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::CasperSlashing));
let s = SpecialRecord::randao_change(&vec![]);
assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::RandaoChange));
let s = SpecialRecord {
kind: 88,
data: vec![],
};
assert_eq!(s.resolve_kind(), None);
}
}

View File

@ -1,4 +1,4 @@
use crate::{test_utils::TestRandom, Hash256, PublicKey, Slot}; use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
@ -46,21 +46,17 @@ fn status_flag_from_byte(flag: u8) -> Result<Option<StatusFlags>, StatusFlagsDec
pub struct Validator { pub struct Validator {
pub pubkey: PublicKey, pub pubkey: PublicKey,
pub withdrawal_credentials: Hash256, pub withdrawal_credentials: Hash256,
pub proposer_slots: u64, pub activation_epoch: Epoch,
pub activation_slot: Slot, pub exit_epoch: Epoch,
pub exit_slot: Slot, pub withdrawal_epoch: Epoch,
pub withdrawal_slot: Slot, pub penalized_epoch: Epoch,
pub penalized_slot: Slot,
pub exit_count: u64,
pub status_flags: Option<StatusFlags>, pub status_flags: Option<StatusFlags>,
pub latest_custody_reseed_slot: Slot,
pub penultimate_custody_reseed_slot: Slot,
} }
impl Validator { impl Validator {
/// This predicate indicates if the validator represented by this record is considered "active" at `slot`. /// This predicate indicates if the validator represented by this record is considered "active" at `slot`.
pub fn is_active_at(&self, slot: Slot) -> bool { pub fn is_active_at(&self, slot: Epoch) -> bool {
self.activation_slot <= slot && slot < self.exit_slot self.activation_epoch <= slot && slot < self.exit_epoch
} }
} }
@ -70,15 +66,11 @@ impl Default for Validator {
Self { Self {
pubkey: PublicKey::default(), pubkey: PublicKey::default(),
withdrawal_credentials: Hash256::default(), withdrawal_credentials: Hash256::default(),
proposer_slots: 0, activation_epoch: Epoch::from(std::u64::MAX),
activation_slot: Slot::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX),
exit_slot: Slot::from(std::u64::MAX), withdrawal_epoch: Epoch::from(std::u64::MAX),
withdrawal_slot: Slot::from(std::u64::MAX), penalized_epoch: Epoch::from(std::u64::MAX),
penalized_slot: Slot::from(std::u64::MAX),
exit_count: 0,
status_flags: None, status_flags: None,
latest_custody_reseed_slot: Slot::from(0_u64), // NOTE: is `GENESIS_SLOT`
penultimate_custody_reseed_slot: Slot::from(0_u64), // NOTE: is `GENESIS_SLOT`
} }
} }
} }
@ -86,7 +78,7 @@ impl Default for Validator {
impl<T: RngCore> TestRandom<T> for StatusFlags { impl<T: RngCore> TestRandom<T> for StatusFlags {
fn random_for_test(rng: &mut T) -> Self { fn random_for_test(rng: &mut T) -> Self {
let options = vec![StatusFlags::InitiatedExit, StatusFlags::Withdrawable]; let options = vec![StatusFlags::InitiatedExit, StatusFlags::Withdrawable];
options[(rng.next_u32() as usize) % options.len()].clone() options[(rng.next_u32() as usize) % options.len()]
} }
} }
@ -94,15 +86,11 @@ impl Encodable for Validator {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.pubkey); s.append(&self.pubkey);
s.append(&self.withdrawal_credentials); s.append(&self.withdrawal_credentials);
s.append(&self.proposer_slots); s.append(&self.activation_epoch);
s.append(&self.activation_slot); s.append(&self.exit_epoch);
s.append(&self.exit_slot); s.append(&self.withdrawal_epoch);
s.append(&self.withdrawal_slot); s.append(&self.penalized_epoch);
s.append(&self.penalized_slot);
s.append(&self.exit_count);
s.append(&status_flag_to_byte(self.status_flags)); s.append(&status_flag_to_byte(self.status_flags));
s.append(&self.latest_custody_reseed_slot);
s.append(&self.penultimate_custody_reseed_slot);
} }
} }
@ -110,15 +98,11 @@ impl Decodable for Validator {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (pubkey, i) = <_>::ssz_decode(bytes, i)?; let (pubkey, i) = <_>::ssz_decode(bytes, i)?;
let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?; let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?;
let (proposer_slots, i) = <_>::ssz_decode(bytes, i)?; let (activation_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (activation_slot, i) = <_>::ssz_decode(bytes, i)?; let (exit_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (exit_slot, i) = <_>::ssz_decode(bytes, i)?; let (withdrawal_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (withdrawal_slot, i) = <_>::ssz_decode(bytes, i)?; let (penalized_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (penalized_slot, i) = <_>::ssz_decode(bytes, i)?;
let (exit_count, i) = <_>::ssz_decode(bytes, i)?;
let (status_flags_byte, i): (u8, usize) = <_>::ssz_decode(bytes, i)?; let (status_flags_byte, i): (u8, usize) = <_>::ssz_decode(bytes, i)?;
let (latest_custody_reseed_slot, i) = <_>::ssz_decode(bytes, i)?;
let (penultimate_custody_reseed_slot, i) = <_>::ssz_decode(bytes, i)?;
let status_flags = status_flag_from_byte(status_flags_byte)?; let status_flags = status_flag_from_byte(status_flags_byte)?;
@ -126,15 +110,11 @@ impl Decodable for Validator {
Self { Self {
pubkey, pubkey,
withdrawal_credentials, withdrawal_credentials,
proposer_slots, activation_epoch,
activation_slot, exit_epoch,
exit_slot, withdrawal_epoch,
withdrawal_slot, penalized_epoch,
penalized_slot,
exit_count,
status_flags, status_flags,
latest_custody_reseed_slot,
penultimate_custody_reseed_slot,
}, },
i, i,
)) ))
@ -146,15 +126,11 @@ impl TreeHash for Validator {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.pubkey.hash_tree_root()); result.append(&mut self.pubkey.hash_tree_root());
result.append(&mut self.withdrawal_credentials.hash_tree_root()); result.append(&mut self.withdrawal_credentials.hash_tree_root());
result.append(&mut self.proposer_slots.hash_tree_root()); result.append(&mut self.activation_epoch.hash_tree_root());
result.append(&mut self.activation_slot.hash_tree_root()); result.append(&mut self.exit_epoch.hash_tree_root());
result.append(&mut self.exit_slot.hash_tree_root()); result.append(&mut self.withdrawal_epoch.hash_tree_root());
result.append(&mut self.withdrawal_slot.hash_tree_root()); result.append(&mut self.penalized_epoch.hash_tree_root());
result.append(&mut self.penalized_slot.hash_tree_root()); result.append(&mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root());
result.append(&mut self.exit_count.hash_tree_root());
result.append(&mut (status_flag_to_byte(self.status_flags) as u64).hash_tree_root());
result.append(&mut self.latest_custody_reseed_slot.hash_tree_root());
result.append(&mut self.penultimate_custody_reseed_slot.hash_tree_root());
hash(&result) hash(&result)
} }
} }
@ -164,15 +140,11 @@ impl<T: RngCore> TestRandom<T> for Validator {
Self { Self {
pubkey: <_>::random_for_test(rng), pubkey: <_>::random_for_test(rng),
withdrawal_credentials: <_>::random_for_test(rng), withdrawal_credentials: <_>::random_for_test(rng),
proposer_slots: <_>::random_for_test(rng), activation_epoch: <_>::random_for_test(rng),
activation_slot: <_>::random_for_test(rng), exit_epoch: <_>::random_for_test(rng),
exit_slot: <_>::random_for_test(rng), withdrawal_epoch: <_>::random_for_test(rng),
withdrawal_slot: <_>::random_for_test(rng), penalized_epoch: <_>::random_for_test(rng),
penalized_slot: <_>::random_for_test(rng),
exit_count: <_>::random_for_test(rng),
status_flags: Some(<_>::random_for_test(rng)), status_flags: Some(<_>::random_for_test(rng)),
latest_custody_reseed_slot: <_>::random_for_test(rng),
penultimate_custody_reseed_slot: <_>::random_for_test(rng),
} }
} }
} }
@ -199,17 +171,17 @@ mod tests {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let mut validator = Validator::random_for_test(&mut rng); let mut validator = Validator::random_for_test(&mut rng);
let activation_slot = u64::random_for_test(&mut rng); let activation_epoch = u64::random_for_test(&mut rng);
let exit_slot = activation_slot + 234; let exit_epoch = activation_epoch + 234;
validator.activation_slot = Slot::from(activation_slot); validator.activation_epoch = Epoch::from(activation_epoch);
validator.exit_slot = Slot::from(exit_slot); validator.exit_epoch = Epoch::from(exit_epoch);
for slot in (activation_slot - 100)..(exit_slot + 100) { for slot in (activation_epoch - 100)..(exit_epoch + 100) {
let slot = Slot::from(slot); let slot = Epoch::from(slot);
if slot < activation_slot { if slot < activation_epoch {
assert!(!validator.is_active_at(slot)); assert!(!validator.is_active_at(slot));
} else if slot >= exit_slot { } else if slot >= exit_epoch {
assert!(!validator.is_active_at(slot)); assert!(!validator.is_active_at(slot));
} else { } else {
assert!(validator.is_active_at(slot)); assert!(validator.is_active_at(slot));

View File

@ -1,15 +1,15 @@
/// Contains logic to manipulate a `&[Validator]`. /// Contains logic to manipulate a `&[Validator]`.
/// For now, we avoid defining a newtype and just have flat functions here. /// For now, we avoid defining a newtype and just have flat functions here.
use super::validator::*; use super::validator::*;
use crate::Slot; use crate::Epoch;
/// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `slot`. /// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`.
pub fn get_active_validator_indices(validators: &[Validator], slot: Slot) -> Vec<usize> { pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> {
validators validators
.iter() .iter()
.enumerate() .enumerate()
.filter_map(|(index, validator)| { .filter_map(|(index, validator)| {
if validator.is_active_at(slot) { if validator.is_active_at(epoch) {
Some(index) Some(index)
} else { } else {
None None
@ -28,8 +28,8 @@ mod tests {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let validators = vec![]; let validators = vec![];
let some_slot = Slot::random_for_test(&mut rng); let some_epoch = Epoch::random_for_test(&mut rng);
let indices = get_active_validator_indices(&validators, some_slot); let indices = get_active_validator_indices(&validators, some_epoch);
assert_eq!(indices, vec![]); assert_eq!(indices, vec![]);
} }
@ -42,8 +42,8 @@ mod tests {
validators.push(Validator::default()) validators.push(Validator::default())
} }
let some_slot = Slot::random_for_test(&mut rng); let some_epoch = Epoch::random_for_test(&mut rng);
let indices = get_active_validator_indices(&validators, some_slot); let indices = get_active_validator_indices(&validators, some_epoch);
assert_eq!(indices, vec![]); assert_eq!(indices, vec![]);
} }
@ -51,7 +51,7 @@ mod tests {
fn can_get_all_active_validator_indices() { fn can_get_all_active_validator_indices() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let count_validators = 10; let count_validators = 10;
let some_slot = Slot::random_for_test(&mut rng); let some_epoch = Epoch::random_for_test(&mut rng);
let mut validators = (0..count_validators) let mut validators = (0..count_validators)
.into_iter() .into_iter()
@ -61,8 +61,8 @@ mod tests {
let activation_offset = u64::random_for_test(&mut rng); let activation_offset = u64::random_for_test(&mut rng);
let exit_offset = u64::random_for_test(&mut rng); let exit_offset = u64::random_for_test(&mut rng);
validator.activation_slot = some_slot - activation_offset; validator.activation_epoch = some_epoch - activation_offset;
validator.exit_slot = some_slot + exit_offset; validator.exit_epoch = some_epoch + exit_offset;
validator validator
}) })
@ -70,10 +70,10 @@ mod tests {
// test boundary condition by ensuring that at least one validator in the list just activated // test boundary condition by ensuring that at least one validator in the list just activated
if let Some(validator) = validators.get_mut(0) { if let Some(validator) = validators.get_mut(0) {
validator.activation_slot = some_slot; validator.activation_epoch = some_epoch;
} }
let indices = get_active_validator_indices(&validators, some_slot); let indices = get_active_validator_indices(&validators, some_epoch);
assert_eq!( assert_eq!(
indices, indices,
(0..count_validators).into_iter().collect::<Vec<_>>() (0..count_validators).into_iter().collect::<Vec<_>>()
@ -82,31 +82,35 @@ mod tests {
fn set_validators_to_default_entry_exit(validators: &mut [Validator]) { fn set_validators_to_default_entry_exit(validators: &mut [Validator]) {
for validator in validators.iter_mut() { for validator in validators.iter_mut() {
validator.activation_slot = Slot::max_value(); validator.activation_epoch = Epoch::max_value();
validator.exit_slot = Slot::max_value(); validator.exit_epoch = Epoch::max_value();
} }
} }
// sets all `validators` to be active as of some slot prior to `slot`. returns the activation slot. // sets all `validators` to be active as of some epoch prior to `epoch`. returns the activation epoch.
fn set_validators_to_activated(validators: &mut [Validator], slot: Slot) -> Slot { fn set_validators_to_activated(validators: &mut [Validator], epoch: Epoch) -> Epoch {
let activation_slot = slot - 10; let activation_epoch = epoch - 10;
for validator in validators.iter_mut() { for validator in validators.iter_mut() {
validator.activation_slot = activation_slot; validator.activation_epoch = activation_epoch;
} }
activation_slot activation_epoch
} }
// sets all `validators` to be exited as of some slot before `slot`. // sets all `validators` to be exited as of some epoch before `epoch`.
fn set_validators_to_exited(validators: &mut [Validator], slot: Slot, activation_slot: Slot) { fn set_validators_to_exited(
assert!(activation_slot < slot); validators: &mut [Validator],
let mut exit_slot = activation_slot + 10; epoch: Epoch,
while exit_slot >= slot { activation_epoch: Epoch,
exit_slot -= 1; ) {
assert!(activation_epoch < epoch);
let mut exit_epoch = activation_epoch + 10;
while exit_epoch >= epoch {
exit_epoch -= 1;
} }
assert!(activation_slot < exit_slot && exit_slot < slot); assert!(activation_epoch < exit_epoch && exit_epoch < epoch);
for validator in validators.iter_mut() { for validator in validators.iter_mut() {
validator.exit_slot = exit_slot; validator.exit_epoch = exit_epoch;
} }
} }
@ -115,18 +119,18 @@ mod tests {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
const COUNT_PARTITIONS: usize = 3; const COUNT_PARTITIONS: usize = 3;
const COUNT_VALIDATORS: usize = 3 * COUNT_PARTITIONS; const COUNT_VALIDATORS: usize = 3 * COUNT_PARTITIONS;
let some_slot: Slot = Slot::random_for_test(&mut rng); let some_epoch: Epoch = Epoch::random_for_test(&mut rng);
let mut validators = (0..COUNT_VALIDATORS) let mut validators = (0..COUNT_VALIDATORS)
.into_iter() .into_iter()
.map(|_| { .map(|_| {
let mut validator = Validator::default(); let mut validator = Validator::default();
let activation_offset = Slot::random_for_test(&mut rng); let activation_offset = Epoch::random_for_test(&mut rng);
let exit_offset = Slot::random_for_test(&mut rng); let exit_offset = Epoch::random_for_test(&mut rng);
validator.activation_slot = some_slot - activation_offset; validator.activation_epoch = some_epoch - activation_offset;
validator.exit_slot = some_slot + exit_offset; validator.exit_epoch = some_epoch + exit_offset;
validator validator
}) })
@ -141,19 +145,19 @@ mod tests {
} }
1 => { 1 => {
// 2. activated, but not exited // 2. activated, but not exited
set_validators_to_activated(chunk, some_slot); set_validators_to_activated(chunk, some_epoch);
// test boundary condition by ensuring that at least one validator in the list just activated // test boundary condition by ensuring that at least one validator in the list just activated
if let Some(validator) = chunk.get_mut(0) { if let Some(validator) = chunk.get_mut(0) {
validator.activation_slot = some_slot; validator.activation_epoch = some_epoch;
} }
} }
2 => { 2 => {
// 3. exited // 3. exited
let activation_slot = set_validators_to_activated(chunk, some_slot); let activation_epoch = set_validators_to_activated(chunk, some_epoch);
set_validators_to_exited(chunk, some_slot, activation_slot); set_validators_to_exited(chunk, some_epoch, activation_epoch);
// test boundary condition by ensuring that at least one validator in the list just exited // test boundary condition by ensuring that at least one validator in the list just exited
if let Some(validator) = chunk.get_mut(0) { if let Some(validator) = chunk.get_mut(0) {
validator.exit_slot = some_slot; validator.exit_epoch = some_epoch;
} }
} }
_ => unreachable!( _ => unreachable!(
@ -162,7 +166,7 @@ mod tests {
} }
} }
let indices = get_active_validator_indices(&validators, some_slot); let indices = get_active_validator_indices(&validators, some_epoch);
assert_eq!(indices, vec![3, 4, 5]); assert_eq!(indices, vec![3, 4, 5]);
} }
} }

View File

@ -81,6 +81,11 @@ impl BooleanBitfield {
self.0.len() self.0.len()
} }
/// Returns true if `self.len() == 0`
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of bytes required to represent this bitfield. /// Returns the number of bytes required to represent this bitfield.
pub fn num_bytes(&self) -> usize { pub fn num_bytes(&self) -> usize {
self.to_bytes().len() self.to_bytes().len()

View File

@ -1,180 +0,0 @@
use bls::verify_proof_of_possession;
use types::{BeaconState, ChainSpec, Deposit, Slot, Validator};
#[derive(Debug, PartialEq, Clone)]
pub enum ValidatorInductionError {
InvalidShard,
InvaidProofOfPossession,
InvalidWithdrawalCredentials,
}
pub fn process_deposit(
state: &mut BeaconState,
deposit: &Deposit,
spec: &ChainSpec,
) -> Result<(), ValidatorInductionError> {
let deposit_input = &deposit.deposit_data.deposit_input;
let deposit_data = &deposit.deposit_data;
// TODO: Update the signature validation as defined in the spec once issues #91 and #70 are completed
if !verify_proof_of_possession(&deposit_input.proof_of_possession, &deposit_input.pubkey) {
return Err(ValidatorInductionError::InvaidProofOfPossession);
}
let validator_index = state
.validator_registry
.iter()
.position(|validator| validator.pubkey == deposit_input.pubkey);
match validator_index {
Some(i) => {
if state.validator_registry[i].withdrawal_credentials
== deposit_input.withdrawal_credentials
{
state.validator_balances[i] += deposit_data.amount;
return Ok(());
}
Err(ValidatorInductionError::InvalidWithdrawalCredentials)
}
None => {
let validator = Validator {
pubkey: deposit_input.pubkey.clone(),
withdrawal_credentials: deposit_input.withdrawal_credentials,
proposer_slots: 0,
activation_slot: spec.far_future_slot,
exit_slot: spec.far_future_slot,
withdrawal_slot: spec.far_future_slot,
penalized_slot: spec.far_future_slot,
exit_count: 0,
status_flags: None,
latest_custody_reseed_slot: Slot::new(0),
penultimate_custody_reseed_slot: Slot::new(0),
};
let _index = state.validator_registry.len();
state.validator_registry.push(validator);
state.validator_balances.push(deposit_data.amount);
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use bls::{create_proof_of_possession, Keypair};
/// The size of a validators deposit in GWei.
pub const DEPOSIT_GWEI: u64 = 32_000_000_000;
fn get_deposit() -> Deposit {
let mut rng = XorShiftRng::from_seed([42; 16]);
let mut deposit = Deposit::random_for_test(&mut rng);
let kp = Keypair::random();
deposit.deposit_data.deposit_input.pubkey = kp.pk.clone();
deposit.deposit_data.deposit_input.proof_of_possession = create_proof_of_possession(&kp);
deposit
}
fn get_validator() -> Validator {
let mut rng = XorShiftRng::from_seed([42; 16]);
Validator::random_for_test(&mut rng)
}
fn deposit_equals_record(dep: &Deposit, val: &Validator) -> bool {
(dep.deposit_data.deposit_input.pubkey == val.pubkey)
& (dep.deposit_data.deposit_input.withdrawal_credentials == val.withdrawal_credentials)
& (verify_proof_of_possession(
&dep.deposit_data.deposit_input.proof_of_possession,
&val.pubkey,
))
}
#[test]
fn test_process_deposit_valid_empty_validators() {
let mut state = BeaconState::default();
let mut deposit = get_deposit();
let spec = ChainSpec::foundation();
deposit.deposit_data.amount = DEPOSIT_GWEI;
let result = process_deposit(&mut state, &deposit, &spec);
assert_eq!(result.unwrap(), ());
assert!(deposit_equals_record(
&deposit,
&state.validator_registry[0]
));
assert_eq!(state.validator_registry.len(), 1);
assert_eq!(state.validator_balances.len(), 1);
}
#[test]
fn test_process_deposits_empty_validators() {
let mut state = BeaconState::default();
let spec = ChainSpec::foundation();
for i in 0..5 {
let mut deposit = get_deposit();
let result = process_deposit(&mut state, &deposit, &spec);
deposit.deposit_data.amount = DEPOSIT_GWEI;
assert_eq!(result.unwrap(), ());
assert!(deposit_equals_record(
&deposit,
&state.validator_registry[i]
));
assert_eq!(state.validator_registry.len(), i + 1);
assert_eq!(state.validator_balances.len(), i + 1);
}
}
#[test]
fn test_process_deposit_top_out() {
let mut state = BeaconState::default();
let spec = ChainSpec::foundation();
let mut deposit = get_deposit();
let mut validator = get_validator();
deposit.deposit_data.amount = DEPOSIT_GWEI;
validator.pubkey = deposit.deposit_data.deposit_input.pubkey.clone();
validator.withdrawal_credentials =
deposit.deposit_data.deposit_input.withdrawal_credentials;
state.validator_registry.push(validator);
state.validator_balances.push(DEPOSIT_GWEI);
let result = process_deposit(&mut state, &deposit, &spec);
assert_eq!(result.unwrap(), ());
assert!(deposit_equals_record(
&deposit,
&state.validator_registry[0]
));
assert_eq!(state.validator_balances[0], DEPOSIT_GWEI * 2);
assert_eq!(state.validator_registry.len(), 1);
assert_eq!(state.validator_balances.len(), 1);
}
#[test]
fn test_process_deposit_invalid_proof_of_possession() {
let mut state = BeaconState::default();
let mut deposit = get_deposit();
let spec = ChainSpec::foundation();
deposit.deposit_data.amount = DEPOSIT_GWEI;
deposit.deposit_data.deposit_input.proof_of_possession =
create_proof_of_possession(&Keypair::random());
let result = process_deposit(&mut state, &deposit, &spec);
assert_eq!(
result,
Err(ValidatorInductionError::InvaidProofOfPossession)
);
assert_eq!(state.validator_registry.len(), 0);
assert_eq!(state.validator_balances.len(), 0);
}
}

View File

@ -1,3 +0,0 @@
mod inductor;
pub use crate::inductor::{process_deposit, ValidatorInductionError};

View File

@ -1,2 +1,5 @@
// The protobuf code-generator is not up-to-date with clippy, therefore we silence some warnings.
#[allow(renamed_and_removed_lints)]
pub mod services; pub mod services;
#[allow(renamed_and_removed_lints)]
pub mod services_grpc; pub mod services_grpc;

View File

@ -5,7 +5,7 @@ use protos::services::{
use protos::services_grpc::BeaconBlockServiceClient; use protos::services_grpc::BeaconBlockServiceClient;
use ssz::{ssz_encode, Decodable}; use ssz::{ssz_encode, Decodable};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, PublicKey, Signature, Slot}; use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, Signature, Slot};
/// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be /// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be
/// implemented upon it. /// implemented upon it.
@ -20,12 +20,6 @@ impl BeaconBlockGrpcClient {
} }
impl BeaconNode for BeaconBlockGrpcClient { impl BeaconNode for BeaconBlockGrpcClient {
fn proposer_nonce(&self, pubkey: &PublicKey) -> Result<u64, BeaconNodeError> {
// TODO: this might not be required.
//
// See: https://github.com/ethereum/eth2.0-specs/pull/496
panic!("Not implemented.")
}
/// Request a Beacon Node (BN) to produce a new block at the supplied slot. /// Request a Beacon Node (BN) to produce a new block at the supplied slot.
/// ///
/// Returns `None` if it is not possible to produce at the supplied slot. For example, if the /// Returns `None` if it is not possible to produce at the supplied slot. For example, if the
@ -33,7 +27,8 @@ impl BeaconNode for BeaconBlockGrpcClient {
fn produce_beacon_block( fn produce_beacon_block(
&self, &self,
slot: Slot, slot: Slot,
randao_reveal: &Signature, // TODO: use randao_reveal, when proto APIs have been updated.
_randao_reveal: &Signature,
) -> Result<Option<BeaconBlock>, BeaconNodeError> { ) -> Result<Option<BeaconBlock>, BeaconNodeError> {
let mut req = ProduceBeaconBlockRequest::new(); let mut req = ProduceBeaconBlockRequest::new();
req.set_slot(slot.as_u64()); req.set_slot(slot.as_u64());
@ -65,11 +60,8 @@ impl BeaconNode for BeaconBlockGrpcClient {
signature, signature,
body: BeaconBlockBody { body: BeaconBlockBody {
proposer_slashings: vec![], proposer_slashings: vec![],
casper_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
custody_reseeds: vec![],
custody_challenges: vec![],
custody_responses: vec![],
deposits: vec![], deposits: vec![],
exits: vec![], exits: vec![],
}, },

View File

@ -1,53 +0,0 @@
use block_producer::{
BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer,
};
use slog::{error, info, warn, Logger};
use slot_clock::SlotClock;
use std::time::Duration;
pub struct BlockProducerService<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub block_producer: BlockProducer<T, U, V, W>,
pub poll_interval_millis: u64,
pub log: Logger,
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducerService<T, U, V, W> {
/// Run a loop which polls the block producer each `poll_interval_millis` millseconds.
///
/// Logs the results of the polls.
pub fn run(&mut self) {
loop {
match self.block_producer.poll() {
Err(error) => {
error!(self.log, "Block producer poll error"; "error" => format!("{:?}", error))
}
Ok(BlockProducerPollOutcome::BlockProduced(slot)) => {
info!(self.log, "Produced block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlashableBlockNotProduced(slot)) => {
warn!(self.log, "Slashable block was not signed"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BlockProductionNotRequired(slot)) => {
info!(self.log, "Block production not required"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ProducerDutiesUnknown(slot)) => {
error!(self.log, "Block production duties unknown"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlotAlreadyProcessed(slot)) => {
warn!(self.log, "Attempted to re-process slot"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BeaconNodeUnableToProduceBlock(slot)) => {
error!(self.log, "Beacon node unable to produce block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SignerRejection(slot)) => {
error!(self.log, "The cryptographic signer refused to sign the block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => {
error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot)
}
};
std::thread::sleep(Duration::from_millis(self.poll_interval_millis));
}
}
}

View File

@ -1,5 +1,58 @@
mod beacon_block_grpc_client; mod beacon_block_grpc_client;
mod block_producer_service; // mod block_producer_service;
use block_producer::{
BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer,
};
use slog::{error, info, warn, Logger};
use slot_clock::SlotClock;
use std::time::Duration;
pub use self::beacon_block_grpc_client::BeaconBlockGrpcClient; pub use self::beacon_block_grpc_client::BeaconBlockGrpcClient;
pub use self::block_producer_service::BlockProducerService;
pub struct BlockProducerService<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub block_producer: BlockProducer<T, U, V, W>,
pub poll_interval_millis: u64,
pub log: Logger,
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducerService<T, U, V, W> {
/// Run a loop which polls the block producer each `poll_interval_millis` millseconds.
///
/// Logs the results of the polls.
pub fn run(&mut self) {
loop {
match self.block_producer.poll() {
Err(error) => {
error!(self.log, "Block producer poll error"; "error" => format!("{:?}", error))
}
Ok(BlockProducerPollOutcome::BlockProduced(slot)) => {
info!(self.log, "Produced block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlashableBlockNotProduced(slot)) => {
warn!(self.log, "Slashable block was not signed"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BlockProductionNotRequired(slot)) => {
info!(self.log, "Block production not required"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ProducerDutiesUnknown(slot)) => {
error!(self.log, "Block production duties unknown"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SlotAlreadyProcessed(slot)) => {
warn!(self.log, "Attempted to re-process slot"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::BeaconNodeUnableToProduceBlock(slot)) => {
error!(self.log, "Beacon node unable to produce block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::SignerRejection(slot)) => {
error!(self.log, "The cryptographic signer refused to sign the block"; "slot" => slot)
}
Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => {
error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot)
}
};
std::thread::sleep(Duration::from_millis(self.poll_interval_millis));
}
}
}

View File

@ -47,7 +47,7 @@ impl EpochDutiesMap {
pub fn get(&self, epoch: Epoch) -> Result<Option<EpochDuties>, EpochDutiesMapError> { pub fn get(&self, epoch: Epoch) -> Result<Option<EpochDuties>, EpochDutiesMapError> {
let map = self.map.read().map_err(|_| EpochDutiesMapError::Poisoned)?; let map = self.map.read().map_err(|_| EpochDutiesMapError::Poisoned)?;
match map.get(&epoch) { match map.get(&epoch) {
Some(duties) => Ok(Some(duties.clone())), Some(duties) => Ok(Some(*duties)),
None => Ok(None), None => Ok(None),
} }
} }

View File

@ -12,7 +12,7 @@ use self::traits::{BeaconNode, BeaconNodeError};
use bls::PublicKey; use bls::PublicKey;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use types::{ChainSpec, Epoch, Slot}; use types::{ChainSpec, Epoch};
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
pub enum PollOutcome { pub enum PollOutcome {
@ -33,7 +33,6 @@ pub enum Error {
SlotClockError, SlotClockError,
SlotUnknowable, SlotUnknowable,
EpochMapPoisoned, EpochMapPoisoned,
EpochLengthIsZero,
BeaconNodeError(BeaconNodeError), BeaconNodeError(BeaconNodeError),
} }
@ -103,6 +102,7 @@ mod tests {
use super::*; use super::*;
use bls::Keypair; use bls::Keypair;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use types::Slot;
// TODO: implement more thorough testing. // TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160 // https://github.com/sigp/lighthouse/issues/160

View File

@ -88,9 +88,12 @@ fn main() {
let spec = Arc::new(ChainSpec::foundation()); let spec = Arc::new(ChainSpec::foundation());
// Clock for determining the present slot. // Clock for determining the present slot.
// TODO: this shouldn't be a static time, instead it should be pulled from the beacon node.
// https://github.com/sigp/lighthouse/issues/160
let genesis_time = 1_549_935_547;
let slot_clock = { let slot_clock = {
info!(log, "Genesis time"; "unix_epoch_seconds" => spec.genesis_time); info!(log, "Genesis time"; "unix_epoch_seconds" => genesis_time);
let clock = SystemTimeSlotClock::new(spec.genesis_time, spec.slot_duration) let clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration)
.expect("Unable to instantiate SystemTimeSlotClock."); .expect("Unable to instantiate SystemTimeSlotClock.");
Arc::new(clock) Arc::new(clock)
}; };
@ -139,7 +142,6 @@ fn main() {
// Spawn a new thread to perform block production for the validator. // Spawn a new thread to perform block production for the validator.
let producer_thread = { let producer_thread = {
let spec = spec.clone(); let spec = spec.clone();
let pubkey = keypair.pk.clone();
let signer = Arc::new(LocalSigner::new(keypair.clone())); let signer = Arc::new(LocalSigner::new(keypair.clone()));
let duties_map = duties_map.clone(); let duties_map = duties_map.clone();
let slot_clock = slot_clock.clone(); let slot_clock = slot_clock.clone();
@ -147,7 +149,7 @@ fn main() {
let client = Arc::new(BeaconBlockGrpcClient::new(beacon_block_grpc_client.clone())); let client = Arc::new(BeaconBlockGrpcClient::new(beacon_block_grpc_client.clone()));
thread::spawn(move || { thread::spawn(move || {
let block_producer = let block_producer =
BlockProducer::new(spec, pubkey, duties_map, slot_clock, client, signer); BlockProducer::new(spec, duties_map, slot_clock, client, signer);
let mut block_producer_service = BlockProducerService { let mut block_producer_service = BlockProducerService {
block_producer, block_producer,
poll_interval_millis, poll_interval_millis,