Merge pull request #1 from sigp/master

Merged from master project
This commit is contained in:
Langers 2019-02-23 13:52:32 +10:00 committed by GitHub
commit 7842b0bcf7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 1836 additions and 1416 deletions

View File

@ -1,7 +1,7 @@
[workspace] [workspace]
members = [ members = [
"eth2/attester", "eth2/attester",
"eth2/block_producer", "eth2/block_proposer",
"eth2/fork_choice", "eth2/fork_choice",
"eth2/state_processing", "eth2/state_processing",
"eth2/types", "eth2/types",
@ -12,6 +12,7 @@ members = [
"eth2/utils/int_to_bytes", "eth2/utils/int_to_bytes",
"eth2/utils/slot_clock", "eth2/utils/slot_clock",
"eth2/utils/ssz", "eth2/utils/ssz",
"eth2/utils/ssz_derive",
"eth2/utils/swap_or_not_shuffle", "eth2/utils/swap_or_not_shuffle",
"eth2/utils/fisher_yates_shuffle", "eth2/utils/fisher_yates_shuffle",
"beacon_node", "beacon_node",

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
block_producer = { path = "../../eth2/block_producer" } block_proposer = { path = "../../eth2/block_proposer" }
bls = { path = "../../eth2/utils/bls" } bls = { path = "../../eth2/utils/bls" }
boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" } boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
db = { path = "../db" } db = { path = "../db" }

View File

@ -1,3 +1,4 @@
use crate::cached_beacon_state::CachedBeaconState;
use state_processing::validate_attestation_without_signature; use state_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use types::{ use types::{
@ -76,12 +77,12 @@ impl AttestationAggregator {
/// - The signature is verified against that of the validator at `validator_index`. /// - The signature is verified against that of the validator at `validator_index`.
pub fn process_free_attestation( pub fn process_free_attestation(
&mut self, &mut self,
state: &BeaconState, cached_state: &CachedBeaconState,
free_attestation: &FreeAttestation, free_attestation: &FreeAttestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> { ) -> Result<Outcome, BeaconStateError> {
let (slot, shard, committee_index) = some_or_invalid!( let (slot, shard, committee_index) = some_or_invalid!(
state.attestation_slot_and_shard_for_validator( cached_state.attestation_slot_and_shard_for_validator(
free_attestation.validator_index as usize, free_attestation.validator_index as usize,
spec, spec,
)?, )?,
@ -104,7 +105,8 @@ impl AttestationAggregator {
let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT); let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT);
let validator_record = some_or_invalid!( let validator_record = some_or_invalid!(
state cached_state
.state
.validator_registry .validator_registry
.get(free_attestation.validator_index as usize), .get(free_attestation.validator_index as usize),
Message::BadValidatorIndex Message::BadValidatorIndex

View File

@ -1,4 +1,5 @@
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
use crate::cached_beacon_state::CachedBeaconState;
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
@ -69,6 +70,7 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
canonical_head: RwLock<CheckPoint>, canonical_head: RwLock<CheckPoint>,
finalized_head: RwLock<CheckPoint>, finalized_head: RwLock<CheckPoint>,
pub state: RwLock<BeaconState>, pub state: RwLock<BeaconState>,
pub cached_state: RwLock<CachedBeaconState>,
pub spec: ChainSpec, pub spec: ChainSpec,
pub fork_choice: RwLock<F>, pub fork_choice: RwLock<F>,
} }
@ -107,6 +109,11 @@ where
let block_root = genesis_block.canonical_root(); let block_root = genesis_block.canonical_root();
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
let cached_state = RwLock::new(CachedBeaconState::from_beacon_state(
genesis_state.clone(),
spec.clone(),
)?);
let finalized_head = RwLock::new(CheckPoint::new( let finalized_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root, block_root,
@ -127,6 +134,7 @@ where
slot_clock, slot_clock,
attestation_aggregator, attestation_aggregator,
state: RwLock::new(genesis_state.clone()), state: RwLock::new(genesis_state.clone()),
cached_state,
finalized_head, finalized_head,
canonical_head, canonical_head,
spec, spec,
@ -253,6 +261,7 @@ where
/// Information is read from the present `beacon_state` shuffling, so only information from the /// Information is read from the present `beacon_state` shuffling, so only information from the
/// present and prior epoch is available. /// present and prior epoch is available.
pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> { pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> {
trace!("BeaconChain::block_proposer: slot: {}", slot);
let index = self let index = self
.state .state
.read() .read()
@ -274,8 +283,12 @@ where
&self, &self,
validator_index: usize, validator_index: usize,
) -> Result<Option<(Slot, u64)>, BeaconStateError> { ) -> Result<Option<(Slot, u64)>, BeaconStateError> {
trace!(
"BeaconChain::validator_attestion_slot_and_shard: validator_index: {}",
validator_index
);
if let Some((slot, shard, _committee)) = self if let Some((slot, shard, _committee)) = self
.state .cached_state
.read() .read()
.attestation_slot_and_shard_for_validator(validator_index, &self.spec)? .attestation_slot_and_shard_for_validator(validator_index, &self.spec)?
{ {
@ -287,6 +300,7 @@ where
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
trace!("BeaconChain::produce_attestation_data: shard: {}", shard);
let justified_epoch = self.justified_epoch(); let justified_epoch = self.justified_epoch();
let justified_block_root = *self let justified_block_root = *self
.state .state
@ -332,9 +346,7 @@ where
let aggregation_outcome = self let aggregation_outcome = self
.attestation_aggregator .attestation_aggregator
.write() .write()
.process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?; .process_free_attestation(&self.cached_state.read(), &free_attestation, &self.spec)?;
// TODO: Check this comment
//.map_err(|e| e.into())?;
// return if the attestation is invalid // return if the attestation is invalid
if !aggregation_outcome.valid { if !aggregation_outcome.valid {
@ -345,6 +357,7 @@ where
self.fork_choice.write().add_attestation( self.fork_choice.write().add_attestation(
free_attestation.validator_index, free_attestation.validator_index,
&free_attestation.data.beacon_block_root, &free_attestation.data.beacon_block_root,
&self.spec,
)?; )?;
Ok(aggregation_outcome) Ok(aggregation_outcome)
} }
@ -474,7 +487,9 @@ where
self.state_store.put(&state_root, &ssz_encode(&state)[..])?; self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
// run the fork_choice add_block logic // run the fork_choice add_block logic
self.fork_choice.write().add_block(&block, &block_root)?; self.fork_choice
.write()
.add_block(&block, &block_root, &self.spec)?;
// If the parent block was the parent_block, automatically update the canonical head. // If the parent block was the parent_block, automatically update the canonical head.
// //
@ -489,6 +504,9 @@ where
); );
// Update the local state variable. // Update the local state variable.
*self.state.write() = state.clone(); *self.state.write() = state.clone();
// Update the cached state variable.
*self.cached_state.write() =
CachedBeaconState::from_beacon_state(state.clone(), self.spec.clone())?;
} }
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
@ -537,9 +555,15 @@ where
}, },
}; };
state trace!("BeaconChain::produce_block: updating state for new block.",);
.per_block_processing_without_verifying_block_signature(&block, &self.spec)
.ok()?; let result =
state.per_block_processing_without_verifying_block_signature(&block, &self.spec);
trace!(
"BeaconNode::produce_block: state processing result: {:?}",
result
);
result.ok()?;
let state_root = state.canonical_root(); let state_root = state.canonical_root();
@ -554,7 +578,10 @@ where
pub fn fork_choice(&self) -> Result<(), Error> { pub fn fork_choice(&self) -> Result<(), Error> {
let present_head = self.finalized_head().beacon_block_root; let present_head = self.finalized_head().beacon_block_root;
let new_head = self.fork_choice.write().find_head(&present_head)?; let new_head = self
.fork_choice
.write()
.find_head(&present_head, &self.spec)?;
if new_head != present_head { if new_head != present_head {
let block = self let block = self

View File

@ -0,0 +1,150 @@
use log::{debug, trace};
use std::collections::HashMap;
use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Epoch, Slot};
pub const CACHE_PREVIOUS: bool = false;
pub const CACHE_CURRENT: bool = true;
pub const CACHE_NEXT: bool = false;
pub type CrosslinkCommittees = Vec<(Vec<usize>, u64)>;
pub type Shard = u64;
pub type CommitteeIndex = u64;
pub type AttestationDuty = (Slot, Shard, CommitteeIndex);
pub type AttestationDutyMap = HashMap<u64, AttestationDuty>;
// TODO: CachedBeaconState is presently duplicating `BeaconState` and `ChainSpec`. This is a
// massive memory waste, switch them to references.
pub struct CachedBeaconState {
pub state: BeaconState,
committees: Vec<Vec<CrosslinkCommittees>>,
attestation_duties: Vec<AttestationDutyMap>,
next_epoch: Epoch,
current_epoch: Epoch,
previous_epoch: Epoch,
spec: ChainSpec,
}
impl CachedBeaconState {
pub fn from_beacon_state(
state: BeaconState,
spec: ChainSpec,
) -> Result<Self, BeaconStateError> {
let current_epoch = state.current_epoch(&spec);
let previous_epoch = if current_epoch == spec.genesis_epoch {
current_epoch
} else {
current_epoch.saturating_sub(1_u64)
};
let next_epoch = state.next_epoch(&spec);
let mut committees: Vec<Vec<CrosslinkCommittees>> = Vec::with_capacity(3);
let mut attestation_duties: Vec<AttestationDutyMap> = Vec::with_capacity(3);
if CACHE_PREVIOUS {
debug!("from_beacon_state: building previous epoch cache.");
let cache = build_epoch_cache(&state, previous_epoch, &spec)?;
committees.push(cache.committees);
attestation_duties.push(cache.attestation_duty_map);
} else {
committees.push(vec![]);
attestation_duties.push(HashMap::new());
}
if CACHE_CURRENT {
debug!("from_beacon_state: building current epoch cache.");
let cache = build_epoch_cache(&state, current_epoch, &spec)?;
committees.push(cache.committees);
attestation_duties.push(cache.attestation_duty_map);
} else {
committees.push(vec![]);
attestation_duties.push(HashMap::new());
}
if CACHE_NEXT {
debug!("from_beacon_state: building next epoch cache.");
let cache = build_epoch_cache(&state, next_epoch, &spec)?;
committees.push(cache.committees);
attestation_duties.push(cache.attestation_duty_map);
} else {
committees.push(vec![]);
attestation_duties.push(HashMap::new());
}
Ok(Self {
state,
committees,
attestation_duties,
next_epoch,
current_epoch,
previous_epoch,
spec,
})
}
fn slot_to_cache_index(&self, slot: Slot) -> Option<usize> {
trace!("slot_to_cache_index: cache lookup");
match slot.epoch(self.spec.epoch_length) {
epoch if (epoch == self.previous_epoch) & CACHE_PREVIOUS => Some(0),
epoch if (epoch == self.current_epoch) & CACHE_CURRENT => Some(1),
epoch if (epoch == self.next_epoch) & CACHE_NEXT => Some(2),
_ => None,
}
}
/// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an
/// attestation.
///
/// Cached method.
///
/// Spec v0.2.0
pub fn attestation_slot_and_shard_for_validator(
&self,
validator_index: usize,
_spec: &ChainSpec,
) -> Result<Option<(Slot, u64, u64)>, BeaconStateError> {
// Get the result for this epoch.
let cache_index = self
.slot_to_cache_index(self.state.slot)
.expect("Current epoch should always have a cache index.");
let duties = self.attestation_duties[cache_index]
.get(&(validator_index as u64))
.and_then(|tuple| Some(*tuple));
Ok(duties)
}
}
struct EpochCacheResult {
committees: Vec<CrosslinkCommittees>,
attestation_duty_map: AttestationDutyMap,
}
fn build_epoch_cache(
state: &BeaconState,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<EpochCacheResult, BeaconStateError> {
let mut epoch_committees: Vec<CrosslinkCommittees> =
Vec::with_capacity(spec.epoch_length as usize);
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
for slot in epoch.slot_iter(spec.epoch_length) {
let slot_committees = state.get_crosslink_committees_at_slot(slot, false, spec)?;
for (committee, shard) in slot_committees {
for (committee_index, validator_index) in committee.iter().enumerate() {
attestation_duty_map.insert(
*validator_index as u64,
(slot, shard, committee_index as u64),
);
}
}
epoch_committees.push(state.get_crosslink_committees_at_slot(slot, false, spec)?)
}
Ok(EpochCacheResult {
committees: epoch_committees,
attestation_duty_map,
})
}

View File

@ -1,7 +1,8 @@
mod attestation_aggregator; mod attestation_aggregator;
mod beacon_chain; mod beacon_chain;
mod cached_beacon_state;
mod checkpoint; mod checkpoint;
pub use self::beacon_chain::{BeaconChain, Error}; pub use self::beacon_chain::{BeaconChain, Error};
pub use self::checkpoint::CheckPoint; pub use self::checkpoint::CheckPoint;
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithms, ForkChoiceError}; pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};

View File

@ -14,7 +14,7 @@ criterion = "0.2"
[dependencies] [dependencies]
attester = { path = "../../../eth2/attester" } attester = { path = "../../../eth2/attester" }
beacon_chain = { path = "../../beacon_chain" } beacon_chain = { path = "../../beacon_chain" }
block_producer = { path = "../../../eth2/block_producer" } block_proposer = { path = "../../../eth2/block_proposer" }
bls = { path = "../../../eth2/utils/bls" } bls = { path = "../../../eth2/utils/bls" }
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" } boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
db = { path = "../../db" } db = { path = "../../db" }

View File

@ -6,7 +6,7 @@ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB, MemoryDB,
}; };
use fork_choice::OptimisedLMDGhost; use fork_choice::BitwiseLMDGhost;
use log::debug; use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
@ -28,7 +28,7 @@ use types::{
/// is not useful for testing that multiple beacon nodes can reach consensus. /// is not useful for testing that multiple beacon nodes can reach consensus.
pub struct BeaconChainHarness { pub struct BeaconChainHarness {
pub db: Arc<MemoryDB>, pub db: Arc<MemoryDB>,
pub beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>>, pub beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub block_store: Arc<BeaconBlockStore<MemoryDB>>, pub block_store: Arc<BeaconBlockStore<MemoryDB>>,
pub state_store: Arc<BeaconStateStore<MemoryDB>>, pub state_store: Arc<BeaconStateStore<MemoryDB>>,
pub validators: Vec<ValidatorHarness>, pub validators: Vec<ValidatorHarness>,
@ -46,7 +46,7 @@ impl BeaconChainHarness {
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let fork_choice = OptimisedLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
let latest_eth1_data = Eth1Data { let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
block_hash: Hash256::zero(), block_hash: Hash256::zero(),
@ -128,7 +128,18 @@ impl BeaconChainHarness {
pub fn increment_beacon_chain_slot(&mut self) -> Slot { pub fn increment_beacon_chain_slot(&mut self) -> Slot {
let slot = self.beacon_chain.present_slot() + 1; let slot = self.beacon_chain.present_slot() + 1;
debug!("Incrementing BeaconChain slot to {}.", slot); let nth_slot = slot
- slot
.epoch(self.spec.epoch_length)
.start_slot(self.spec.epoch_length);
let nth_epoch = slot.epoch(self.spec.epoch_length) - self.spec.genesis_epoch;
debug!(
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
slot,
slot.epoch(self.spec.epoch_length),
nth_epoch,
nth_slot
);
self.beacon_chain.slot_clock.set_slot(slot.as_u64()); self.beacon_chain.slot_clock.set_slot(slot.as_u64());
self.beacon_chain.advance_state(slot).unwrap(); self.beacon_chain.advance_state(slot).unwrap();
@ -209,6 +220,7 @@ impl BeaconChainHarness {
self.increment_beacon_chain_slot(); self.increment_beacon_chain_slot();
// Produce a new block. // Produce a new block.
debug!("Producing block...");
let block = self.produce_block(); let block = self.produce_block();
debug!("Submitting block for processing..."); debug!("Submitting block for processing...");
self.beacon_chain.process_block(block).unwrap(); self.beacon_chain.process_block(block).unwrap();

View File

@ -3,7 +3,7 @@ use attester::{
PublishOutcome as AttestationPublishOutcome, PublishOutcome as AttestationPublishOutcome,
}; };
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use block_producer::{ use block_proposer::{
BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError, BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError,
PublishOutcome as BlockPublishOutcome, PublishOutcome as BlockPublishOutcome,
}; };

View File

@ -2,7 +2,7 @@ use attester::{
DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError, DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError,
}; };
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use block_producer::{ use block_proposer::{
DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError, DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError,
}; };
use db::ClientDB; use db::ClientDB;

View File

@ -1,5 +1,5 @@
use attester::Signer as AttesterSigner; use attester::Signer as AttesterSigner;
use block_producer::Signer as BlockProposerSigner; use block_proposer::Signer as BlockProposerSigner;
use std::sync::RwLock; use std::sync::RwLock;
use types::{Keypair, Signature}; use types::{Keypair, Signature};

View File

@ -5,12 +5,12 @@ mod local_signer;
use attester::PollOutcome as AttestationPollOutcome; use attester::PollOutcome as AttestationPollOutcome;
use attester::{Attester, Error as AttestationPollError}; use attester::{Attester, Error as AttestationPollError};
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use block_producer::PollOutcome as BlockPollOutcome; use block_proposer::PollOutcome as BlockPollOutcome;
use block_producer::{BlockProducer, Error as BlockPollError}; use block_proposer::{BlockProducer, Error as BlockPollError};
use db::MemoryDB; use db::MemoryDB;
use direct_beacon_node::DirectBeaconNode; use direct_beacon_node::DirectBeaconNode;
use direct_duties::DirectDuties; use direct_duties::DirectDuties;
use fork_choice::OptimisedLMDGhost; use fork_choice::BitwiseLMDGhost;
use local_signer::LocalSigner; use local_signer::LocalSigner;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use std::sync::Arc; use std::sync::Arc;
@ -36,20 +36,20 @@ pub enum AttestationProduceError {
pub struct ValidatorHarness { pub struct ValidatorHarness {
pub block_producer: BlockProducer< pub block_producer: BlockProducer<
TestingSlotClock, TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>, DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>, DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner, LocalSigner,
>, >,
pub attester: Attester< pub attester: Attester<
TestingSlotClock, TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>, DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>, DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner, LocalSigner,
>, >,
pub spec: Arc<ChainSpec>, pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>>, pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub keypair: Keypair, pub keypair: Keypair,
pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>>, pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub slot_clock: Arc<TestingSlotClock>, pub slot_clock: Arc<TestingSlotClock>,
pub signer: Arc<LocalSigner>, pub signer: Arc<LocalSigner>,
} }
@ -61,7 +61,7 @@ impl ValidatorHarness {
/// A `BlockProducer` and `Attester` is created.. /// A `BlockProducer` and `Attester` is created..
pub fn new( pub fn new(
keypair: Keypair, keypair: Keypair,
beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, OptimisedLMDGhost<MemoryDB>>>, beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
spec: Arc<ChainSpec>, spec: Arc<ChainSpec>,
) -> Self { ) -> Self {
let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64())); let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64()));

View File

@ -1,19 +1,14 @@
use env_logger::{Builder, Env}; use env_logger::{Builder, Env};
use log::debug; use log::debug;
use test_harness::BeaconChainHarness; use test_harness::BeaconChainHarness;
use types::{ChainSpec, Slot}; use types::ChainSpec;
#[test] #[test]
#[ignore]
fn it_can_build_on_genesis_block() { fn it_can_build_on_genesis_block() {
let mut spec = ChainSpec::foundation(); Builder::from_env(Env::default().default_filter_or("info")).init();
spec.genesis_slot = Slot::new(spec.epoch_length * 8);
/* let spec = ChainSpec::few_validators();
spec.shard_count = spec.shard_count / 8; let validator_count = 8;
spec.target_committee_size = spec.target_committee_size / 8;
*/
let validator_count = 1000;
let mut harness = BeaconChainHarness::new(spec, validator_count as usize); let mut harness = BeaconChainHarness::new(spec, validator_count as usize);
@ -23,21 +18,22 @@ fn it_can_build_on_genesis_block() {
#[test] #[test]
#[ignore] #[ignore]
fn it_can_produce_past_first_epoch_boundary() { fn it_can_produce_past_first_epoch_boundary() {
Builder::from_env(Env::default().default_filter_or("debug")).init(); Builder::from_env(Env::default().default_filter_or("info")).init();
let validator_count = 100; let spec = ChainSpec::few_validators();
let validator_count = 8;
debug!("Starting harness build..."); debug!("Starting harness build...");
let mut harness = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); let mut harness = BeaconChainHarness::new(spec, validator_count);
debug!("Harness built, tests starting.."); debug!("Harness built, tests starting..");
let blocks = harness.spec.epoch_length * 3 + 1; let blocks = harness.spec.epoch_length * 2 + 1;
for i in 0..blocks { for i in 0..blocks {
harness.advance_chain_with_block(); harness.advance_chain_with_block();
debug!("Produced block {}/{}.", i, blocks); debug!("Produced block {}/{}.", i + 1, blocks);
} }
let dump = harness.chain_dump().expect("Chain dump failed."); let dump = harness.chain_dump().expect("Chain dump failed.");

View File

@ -14,7 +14,7 @@ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB, MemoryDB,
}; };
use fork_choice::optimised_lmd_ghost::OptimisedLMDGhost; use fork_choice::BitwiseLMDGhost;
use slog::{error, info, o, Drain}; use slog::{error, info, o, Drain};
use slot_clock::SystemTimeSlotClock; use slot_clock::SystemTimeSlotClock;
use std::sync::Arc; use std::sync::Arc;
@ -81,7 +81,7 @@ fn main() {
let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration)
.expect("Unable to load SystemTimeSlotClock"); .expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = OptimisedLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
/* /*
* Generate some random data to start a chain with. * Generate some random data to start a chain with.

View File

@ -1,5 +1,5 @@
[package] [package]
name = "block_producer" name = "block_proposer"
version = "0.1.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"

View File

@ -236,7 +236,7 @@ mod tests {
epoch_map.map.insert(produce_epoch, produce_slot); epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map); let epoch_map = Arc::new(epoch_map);
let mut block_producer = BlockProducer::new( let mut block_proposer = BlockProducer::new(
spec.clone(), spec.clone(),
epoch_map.clone(), epoch_map.clone(),
slot_clock.clone(), slot_clock.clone(),
@ -251,28 +251,28 @@ mod tests {
// One slot before production slot... // One slot before production slot...
slot_clock.set_slot(produce_slot.as_u64() - 1); slot_clock.set_slot(produce_slot.as_u64() - 1);
assert_eq!( assert_eq!(
block_producer.poll(), block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1)) Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1))
); );
// On the produce slot... // On the produce slot...
slot_clock.set_slot(produce_slot.as_u64()); slot_clock.set_slot(produce_slot.as_u64());
assert_eq!( assert_eq!(
block_producer.poll(), block_proposer.poll(),
Ok(PollOutcome::BlockProduced(produce_slot.into())) Ok(PollOutcome::BlockProduced(produce_slot.into()))
); );
// Trying the same produce slot again... // Trying the same produce slot again...
slot_clock.set_slot(produce_slot.as_u64()); slot_clock.set_slot(produce_slot.as_u64());
assert_eq!( assert_eq!(
block_producer.poll(), block_proposer.poll(),
Ok(PollOutcome::SlotAlreadyProcessed(produce_slot)) Ok(PollOutcome::SlotAlreadyProcessed(produce_slot))
); );
// One slot after the produce slot... // One slot after the produce slot...
slot_clock.set_slot(produce_slot.as_u64() + 1); slot_clock.set_slot(produce_slot.as_u64() + 1);
assert_eq!( assert_eq!(
block_producer.poll(), block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1)) Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1))
); );
@ -280,7 +280,7 @@ mod tests {
let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length; let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length;
slot_clock.set_slot(slot); slot_clock.set_slot(slot);
assert_eq!( assert_eq!(
block_producer.poll(), block_proposer.poll(),
Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot))) Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot)))
); );
} }

View File

@ -9,10 +9,13 @@ db = { path = "../../beacon_node/db" }
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
types = { path = "../types" } types = { path = "../types" }
fast-math = "0.1.1" fast-math = "0.1.1"
byteorder = "1.3.1" log = "0.4.6"
bit-vec = "0.5.0"
[dev-dependencies] [dev-dependencies]
hex = "0.3.2"
yaml-rust = "0.4.2" yaml-rust = "0.4.2"
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
slot_clock = { path = "../utils/slot_clock" } slot_clock = { path = "../utils/slot_clock" }
beacon_chain = { path = "../../beacon_node/beacon_chain" } beacon_chain = { path = "../../beacon_node/beacon_chain" }
env_logger = "0.6.0"

View File

@ -1,49 +1,25 @@
// Copyright 2019 Sigma Prime Pty Ltd. extern crate bit_vec;
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
extern crate byteorder;
extern crate fast_math; extern crate fast_math;
use crate::{ForkChoice, ForkChoiceError}; use crate::{ForkChoice, ForkChoiceError};
use byteorder::{BigEndian, ByteOrder}; use bit_vec::BitVec;
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, ClientDB,
}; };
use fast_math::log2_raw; use fast_math::log2_raw;
use log::{debug, trace};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock,
Hash256, Slot, SlotHeight, ChainSpec, Hash256, Slot, SlotHeight,
}; };
//TODO: Pruning - Children //TODO: Pruning - Children
//TODO: Handle Syncing //TODO: Handle Syncing
//TODO: Sort out global constants /// The optimised bitwise LMD-GHOST fork choice rule.
const GENESIS_SLOT: u64 = 0;
const FORK_CHOICE_BALANCE_INCREMENT: u64 = 1e9 as u64;
const MAX_DEPOSIT_AMOUNT: u64 = 32e9 as u64;
const EPOCH_LENGTH: u64 = 64;
/// The optimised LMD-GHOST fork choice rule.
/// NOTE: This uses u32 to represent difference between block heights. Thus this is only /// NOTE: This uses u32 to represent difference between block heights. Thus this is only
/// applicable for block height differences in the range of a u32. /// applicable for block height differences in the range of a u32.
/// This can potentially be parallelized in some parts. /// This can potentially be parallelized in some parts.
@ -51,6 +27,13 @@ const EPOCH_LENGTH: u64 = 64;
// the comparison. Log2_raw takes 2ns according to the documentation. // the comparison. Log2_raw takes 2ns according to the documentation.
#[inline] #[inline]
fn log2_int(x: u32) -> u32 { fn log2_int(x: u32) -> u32 {
if x == 0 {
return 0;
}
assert!(
x <= std::f32::MAX as u32,
"Height too large for fast log in bitwise fork choice"
);
log2_raw(x as f32) as u32 log2_raw(x as f32) as u32
} }
@ -58,8 +41,8 @@ fn power_of_2_below(x: u32) -> u32 {
2u32.pow(log2_int(x)) 2u32.pow(log2_int(x))
} }
/// Stores the necessary data structures to run the optimised lmd ghost algorithm. /// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm.
pub struct OptimisedLMDGhost<T: ClientDB + Sized> { pub struct BitwiseLMDGhost<T: ClientDB + Sized> {
/// A cache of known ancestors at given heights for a specific block. /// A cache of known ancestors at given heights for a specific block.
//TODO: Consider FnvHashMap //TODO: Consider FnvHashMap
cache: HashMap<CacheKey<u32>, Hash256>, cache: HashMap<CacheKey<u32>, Hash256>,
@ -78,7 +61,7 @@ pub struct OptimisedLMDGhost<T: ClientDB + Sized> {
max_known_height: SlotHeight, max_known_height: SlotHeight,
} }
impl<T> OptimisedLMDGhost<T> impl<T> BitwiseLMDGhost<T>
where where
T: ClientDB + Sized, T: ClientDB + Sized,
{ {
@ -86,7 +69,7 @@ where
block_store: Arc<BeaconBlockStore<T>>, block_store: Arc<BeaconBlockStore<T>>,
state_store: Arc<BeaconStateStore<T>>, state_store: Arc<BeaconStateStore<T>>,
) -> Self { ) -> Self {
OptimisedLMDGhost { BitwiseLMDGhost {
cache: HashMap::new(), cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16], ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(), latest_attestation_targets: HashMap::new(),
@ -103,6 +86,7 @@ where
&self, &self,
state_root: &Hash256, state_root: &Hash256,
block_slot: Slot, block_slot: Slot,
spec: &ChainSpec,
) -> Result<HashMap<Hash256, u64>, ForkChoiceError> { ) -> Result<HashMap<Hash256, u64>, ForkChoiceError> {
// get latest votes // get latest votes
// Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) //
@ -117,25 +101,31 @@ where
let active_validator_indices = get_active_validator_indices( let active_validator_indices = get_active_validator_indices(
&current_state.validator_registry[..], &current_state.validator_registry[..],
block_slot.epoch(EPOCH_LENGTH), block_slot.epoch(spec.epoch_length),
); );
for index in active_validator_indices { for index in active_validator_indices {
let balance = let balance = std::cmp::min(
std::cmp::min(current_state.validator_balances[index], MAX_DEPOSIT_AMOUNT) current_state.validator_balances[index],
/ FORK_CHOICE_BALANCE_INCREMENT; spec.max_deposit_amount,
) / spec.fork_choice_balance_increment;
if balance > 0 { if balance > 0 {
if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) {
*latest_votes.entry(*target).or_insert_with(|| 0) += balance; *latest_votes.entry(*target).or_insert_with(|| 0) += balance;
} }
} }
} }
trace!("Latest votes: {:?}", latest_votes);
Ok(latest_votes) Ok(latest_votes)
} }
/// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`. /// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`.
fn get_ancestor(&mut self, block_hash: Hash256, at_height: SlotHeight) -> Option<Hash256> { fn get_ancestor(
&mut self,
block_hash: Hash256,
target_height: SlotHeight,
spec: &ChainSpec,
) -> Option<Hash256> {
// return None if we can't get the block from the db. // return None if we can't get the block from the db.
let block_height = { let block_height = {
let block_slot = self let block_slot = self
@ -145,32 +135,31 @@ where
.expect("Should have returned already if None") .expect("Should have returned already if None")
.slot; .slot;
block_slot.height(Slot::from(GENESIS_SLOT)) block_slot.height(spec.genesis_slot)
}; };
// verify we haven't exceeded the block height // verify we haven't exceeded the block height
if at_height >= block_height { if target_height >= block_height {
if at_height > block_height { if target_height > block_height {
return None; return None;
} else { } else {
return Some(block_hash); return Some(block_hash);
} }
} }
// check if the result is stored in our cache // check if the result is stored in our cache
let cache_key = CacheKey::new(&block_hash, at_height.as_u32()); let cache_key = CacheKey::new(&block_hash, target_height.as_u32());
if let Some(ancestor) = self.cache.get(&cache_key) { if let Some(ancestor) = self.cache.get(&cache_key) {
return Some(*ancestor); return Some(*ancestor);
} }
// not in the cache recursively search for ancestors using a log-lookup // not in the cache recursively search for ancestors using a log-lookup
if let Some(ancestor) = { if let Some(ancestor) = {
let ancestor_lookup = self.ancestors let ancestor_lookup = self.ancestors
[log2_int((block_height - at_height - 1u64).as_u32()) as usize] [log2_int((block_height - target_height - 1u64).as_u32()) as usize]
.get(&block_hash) .get(&block_hash)
//TODO: Panic if we can't lookup and fork choice fails //TODO: Panic if we can't lookup and fork choice fails
.expect("All blocks should be added to the ancestor log lookup table"); .expect("All blocks should be added to the ancestor log lookup table");
self.get_ancestor(*ancestor_lookup, at_height) self.get_ancestor(*ancestor_lookup, target_height, &spec)
} { } {
// add the result to the cache // add the result to the cache
self.cache.insert(cache_key, ancestor); self.cache.insert(cache_key, ancestor);
@ -185,15 +174,17 @@ where
&mut self, &mut self,
latest_votes: &HashMap<Hash256, u64>, latest_votes: &HashMap<Hash256, u64>,
block_height: SlotHeight, block_height: SlotHeight,
spec: &ChainSpec,
) -> Option<Hash256> { ) -> Option<Hash256> {
// map of vote counts for every hash at this height // map of vote counts for every hash at this height
let mut current_votes: HashMap<Hash256, u64> = HashMap::new(); let mut current_votes: HashMap<Hash256, u64> = HashMap::new();
let mut total_vote_count = 0; let mut total_vote_count = 0;
trace!("Clear winner at block height: {}", block_height);
// loop through the latest votes and count all votes // loop through the latest votes and count all votes
// these have already been weighted by balance // these have already been weighted by balance
for (hash, votes) in latest_votes.iter() { for (hash, votes) in latest_votes.iter() {
if let Some(ancestor) = self.get_ancestor(*hash, block_height) { if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) {
let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0);
current_votes.insert(ancestor, current_vote_value + *votes); current_votes.insert(ancestor, current_vote_value + *votes);
total_vote_count += votes; total_vote_count += votes;
@ -210,54 +201,62 @@ where
None None
} }
// Finds the best child, splitting children into a binary tree, based on their hashes // Finds the best child, splitting children into a binary tree, based on their hashes (Bitwise
// LMD Ghost)
fn choose_best_child(&self, votes: &HashMap<Hash256, u64>) -> Option<Hash256> { fn choose_best_child(&self, votes: &HashMap<Hash256, u64>) -> Option<Hash256> {
let mut bitmask = 0; if votes.is_empty() {
for bit in (0..=255).rev() { return None;
}
let mut bitmask: BitVec = BitVec::new();
// loop through all bits
for bit in 0..=256 {
let mut zero_votes = 0; let mut zero_votes = 0;
let mut one_votes = 0; let mut one_votes = 0;
let mut single_candidate = None; let mut single_candidate = (None, false);
trace!("Child vote length: {}", votes.len());
for (candidate, votes) in votes.iter() { for (candidate, votes) in votes.iter() {
let candidate_uint = BigEndian::read_u32(candidate); let candidate_bit: BitVec = BitVec::from_bytes(&candidate);
if candidate_uint >> (bit + 1) != bitmask {
// if the bitmasks don't match, exclude candidate
if !bitmask.iter().eq(candidate_bit.iter().take(bit)) {
trace!(
"Child: {} was removed in bit: {} with the bitmask: {:?}",
candidate,
bit,
bitmask
);
continue; continue;
} }
if (candidate_uint >> bit) % 2 == 0 { if candidate_bit.get(bit) == Some(false) {
zero_votes += votes; zero_votes += votes;
} else { } else {
one_votes += votes; one_votes += votes;
} }
if single_candidate.is_none() { if single_candidate.0.is_none() {
single_candidate = Some(candidate); single_candidate.0 = Some(candidate);
single_candidate.1 = true;
} else { } else {
single_candidate = None; single_candidate.1 = false;
} }
} }
bitmask = (bitmask * 2) + { bitmask.push(one_votes > zero_votes);
if one_votes > zero_votes { if single_candidate.1 {
1 return Some(*single_candidate.0.expect("Cannot reach this"));
} else {
0
} }
};
if let Some(candidate) = single_candidate {
return Some(*candidate);
}
//TODO Remove this during benchmark after testing
assert!(bit >= 1);
} }
// should never reach here // should never reach here
None None
} }
} }
impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> { impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
fn add_block( fn add_block(
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,
block_hash: &Hash256, block_hash: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// get the height of the parent // get the height of the parent
let parent_height = self let parent_height = self
@ -265,7 +264,7 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
.get_deserialized(&block.parent_root)? .get_deserialized(&block.parent_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
let parent_hash = &block.parent_root; let parent_hash = &block.parent_root;
@ -295,22 +294,29 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
&mut self, &mut self,
validator_index: u64, validator_index: u64,
target_block_root: &Hash256, target_block_root: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// simply add the attestation to the latest_attestation_target if the block_height is // simply add the attestation to the latest_attestation_target if the block_height is
// larger // larger
trace!(
"Adding attestation of validator: {:?} for block: {}",
validator_index,
target_block_root
);
let attestation_target = self let attestation_target = self
.latest_attestation_targets .latest_attestation_targets
.entry(validator_index) .entry(validator_index)
.or_insert_with(|| *target_block_root); .or_insert_with(|| *target_block_root);
// if we already have a value // if we already have a value
if attestation_target != target_block_root { if attestation_target != target_block_root {
trace!("Old attestation found: {:?}", attestation_target);
// get the height of the target block // get the height of the target block
let block_height = self let block_height = self
.block_store .block_store
.get_deserialized(&target_block_root)? .get_deserialized(&target_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
// get the height of the past target block // get the height of the past target block
let past_block_height = self let past_block_height = self
@ -318,9 +324,10 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
.get_deserialized(&attestation_target)? .get_deserialized(&attestation_target)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
// update the attestation only if the new target is higher // update the attestation only if the new target is higher
if past_block_height < block_height { if past_block_height < block_height {
trace!("Updating old attestation");
*attestation_target = *target_block_root; *attestation_target = *target_block_root;
} }
} }
@ -328,25 +335,39 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
} }
/// Perform lmd_ghost on the current chain to find the head. /// Perform lmd_ghost on the current chain to find the head.
fn find_head(&mut self, justified_block_start: &Hash256) -> Result<Hash256, ForkChoiceError> { fn find_head(
&mut self,
justified_block_start: &Hash256,
spec: &ChainSpec,
) -> Result<Hash256, ForkChoiceError> {
debug!(
"Starting optimised fork choice at block: {}",
justified_block_start
);
let block = self let block = self
.block_store .block_store
.get_deserialized(&justified_block_start)? .get_deserialized(&justified_block_start)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?;
let block_slot = block.slot(); let block_slot = block.slot();
let block_height = block_slot.height(Slot::from(GENESIS_SLOT));
let state_root = block.state_root(); let state_root = block.state_root();
let mut block_height = block_slot.height(spec.genesis_slot);
let mut current_head = *justified_block_start; let mut current_head = *justified_block_start;
let mut latest_votes = self.get_latest_votes(&state_root, block_slot)?; let mut latest_votes = self.get_latest_votes(&state_root, block_slot, spec)?;
// remove any votes that don't relate to our current head. // remove any votes that don't relate to our current head.
latest_votes.retain(|hash, _| self.get_ancestor(*hash, block_height) == Some(current_head)); latest_votes
.retain(|hash, _| self.get_ancestor(*hash, block_height, spec) == Some(current_head));
// begin searching for the head // begin searching for the head
loop { loop {
debug!(
"Iteration for block: {} with vote length: {}",
current_head,
latest_votes.len()
);
// if there are no children, we are done, return the current_head // if there are no children, we are done, return the current_head
let children = match self.children.get(&current_head) { let children = match self.children.get(&current_head) {
Some(children) => children.clone(), Some(children) => children.clone(),
@ -358,9 +379,11 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
let mut step = let mut step =
power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u32()) / 2; power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u32()) / 2;
while step > 0 { while step > 0 {
trace!("Current Step: {}", step);
if let Some(clear_winner) = self.get_clear_winner( if let Some(clear_winner) = self.get_clear_winner(
&latest_votes, &latest_votes,
block_height - (block_height % u64::from(step)) + u64::from(step), block_height - (block_height % u64::from(step)) + u64::from(step),
spec,
) { ) {
current_head = clear_winner; current_head = clear_winner;
break; break;
@ -368,17 +391,23 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
step /= 2; step /= 2;
} }
if step > 0 { if step > 0 {
trace!("Found clear winner in log lookup");
} }
// if our skip lookup failed and we only have one child, progress to that child // if our skip lookup failed and we only have one child, progress to that child
else if children.len() == 1 { else if children.len() == 1 {
current_head = children[0]; current_head = children[0];
trace!(
"Lookup failed, only one child, proceeding to child: {}",
current_head
);
} }
// we need to find the best child path to progress down. // we need to find the best child path to progress down.
else { else {
trace!("Searching for best child");
let mut child_votes = HashMap::new(); let mut child_votes = HashMap::new();
for (voted_hash, vote) in latest_votes.iter() { for (voted_hash, vote) in latest_votes.iter() {
// if the latest votes correspond to a child // if the latest votes correspond to a child
if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1) { if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1, spec) {
// add up the votes for each child // add up the votes for each child
*child_votes.entry(child).or_insert_with(|| 0) += vote; *child_votes.entry(child).or_insert_with(|| 0) += vote;
} }
@ -387,22 +416,30 @@ impl<T: ClientDB + Sized> ForkChoice for OptimisedLMDGhost<T> {
current_head = self current_head = self
.choose_best_child(&child_votes) .choose_best_child(&child_votes)
.ok_or(ForkChoiceError::CannotFindBestChild)?; .ok_or(ForkChoiceError::CannotFindBestChild)?;
trace!("Best child found: {}", current_head);
} }
// No head was found, re-iterate // didn't find head yet, proceed to next iteration
// update block height
// update the block height for the next iteration block_height = self
let block_height = self
.block_store .block_store
.get_deserialized(&current_head)? .get_deserialized(&current_head)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
// prune the latest votes for votes that are not part of current chosen chain // prune the latest votes for votes that are not part of current chosen chain
// more specifically, only keep votes that have head as an ancestor // more specifically, only keep votes that have head as an ancestor
latest_votes for hash in latest_votes.keys() {
.retain(|hash, _| self.get_ancestor(*hash, block_height) == Some(current_head)); trace!(
"Ancestor for vote: {} at height: {} is: {:?}",
hash,
block_height,
self.get_ancestor(*hash, block_height, spec)
);
}
latest_votes.retain(|hash, _| {
self.get_ancestor(*hash, block_height, spec) == Some(current_head)
});
} }
} }
} }

View File

@ -1,57 +1,36 @@
// Copyright 2019 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This crate stores the various implementations of fork-choice rules that can be used for the //! This crate stores the various implementations of fork-choice rules that can be used for the
//! beacon blockchain. //! beacon blockchain.
//! //!
//! There are four implementations. One is the naive longest chain rule (primarily for testing //! There are three implementations. One is the naive longest chain rule (primarily for testing
//! purposes). The other three are proposed implementations of the LMD-GHOST fork-choice rule with various forms of optimisation. //! purposes). The other two are proposed implementations of the LMD-GHOST fork-choice rule with various forms of optimisation.
//! //!
//! The current implementations are: //! The current implementations are:
//! - [`longest-chain`]: Simplistic longest-chain fork choice - primarily for testing, **not for //! - [`longest-chain`]: Simplistic longest-chain fork choice - primarily for testing, **not for
//! production**. //! production**.
//! - [`slow_lmd_ghost`]: This is a simple and very inefficient implementation given in the ethereum 2.0 //! - [`slow_lmd_ghost`]: This is a simple and very inefficient implementation given in the ethereum 2.0
//! specifications (https://github.com/ethereum/eth2.0-specs/blob/v0.1/specs/core/0_beacon-chain.md#get_block_root). //! specifications (https://github.com/ethereum/eth2.0-specs/blob/v0.1/specs/core/0_beacon-chain.md#get_block_root).
//! - [`optimised_lmd_ghost`]: This is an optimised version of the naive implementation as proposed //! - [`bitwise_lmd_ghost`]: This is an optimised version of bitwise LMD-GHOST as proposed
//! by Vitalik. The reference implementation can be found at: https://github.com/ethereum/research/blob/master/ghost/ghost.py //! by Vitalik. The reference implementation can be found at: https://github.com/ethereum/research/blob/master/ghost/ghost.py
//! - [`protolambda_lmd_ghost`]: Another optimised version of LMD-GHOST designed by @protolambda.
//! The go implementation can be found here: https://github.com/protolambda/lmd-ghost.
//! //!
//! [`longest-chain`]: struct.LongestChain.html
//! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html //! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html
//! [`optimised_lmd_ghost`]: struct.OptimisedLmdGhost.html //! [`bitwise_lmd_ghost`]: struct.OptimisedLmdGhost.html
//! [`protolambda_lmd_ghost`]: struct.ProtolambdaLmdGhost.html
extern crate db; extern crate db;
extern crate ssz; extern crate ssz;
extern crate types; extern crate types;
pub mod bitwise_lmd_ghost;
pub mod longest_chain; pub mod longest_chain;
pub mod optimised_lmd_ghost;
pub mod slow_lmd_ghost; pub mod slow_lmd_ghost;
use db::stores::BeaconBlockAtSlotError; use db::stores::BeaconBlockAtSlotError;
use db::DBError; use db::DBError;
use types::{BeaconBlock, Hash256}; use types::{BeaconBlock, ChainSpec, Hash256};
pub use bitwise_lmd_ghost::BitwiseLMDGhost;
pub use longest_chain::LongestChain; pub use longest_chain::LongestChain;
pub use optimised_lmd_ghost::OptimisedLMDGhost; pub use slow_lmd_ghost::SlowLMDGhost;
/// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures
/// which can be built in block processing through the `add_block` and `add_attestation` functions. /// which can be built in block processing through the `add_block` and `add_attestation` functions.
@ -63,6 +42,7 @@ pub trait ForkChoice: Send + Sync {
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,
block_hash: &Hash256, block_hash: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError>; ) -> Result<(), ForkChoiceError>;
/// Called when an attestation has been added. Allows generic attestation-level data structures to be built for a given fork choice. /// Called when an attestation has been added. Allows generic attestation-level data structures to be built for a given fork choice.
// This can be generalised to a full attestation if required later. // This can be generalised to a full attestation if required later.
@ -70,10 +50,15 @@ pub trait ForkChoice: Send + Sync {
&mut self, &mut self,
validator_index: u64, validator_index: u64,
target_block_hash: &Hash256, target_block_hash: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError>; ) -> Result<(), ForkChoiceError>;
/// The fork-choice algorithm to find the current canonical head of the chain. /// The fork-choice algorithm to find the current canonical head of the chain.
// TODO: Remove the justified_start_block parameter and make it internal // TODO: Remove the justified_start_block parameter and make it internal
fn find_head(&mut self, justified_start_block: &Hash256) -> Result<Hash256, ForkChoiceError>; fn find_head(
&mut self,
justified_start_block: &Hash256,
spec: &ChainSpec,
) -> Result<Hash256, ForkChoiceError>;
} }
/// Possible fork choice errors that can occur. /// Possible fork choice errors that can occur.
@ -109,11 +94,11 @@ impl From<BeaconBlockAtSlotError> for ForkChoiceError {
} }
/// Fork choice options that are currently implemented. /// Fork choice options that are currently implemented.
pub enum ForkChoiceAlgorithms { pub enum ForkChoiceAlgorithm {
/// Chooses the longest chain becomes the head. Not for production. /// Chooses the longest chain becomes the head. Not for production.
LongestChain, LongestChain,
/// A simple and highly inefficient implementation of LMD ghost. /// A simple and highly inefficient implementation of LMD ghost.
SlowLMDGhost, SlowLMDGhost,
/// An optimised version of LMD-GHOST by Vitalik. /// An optimised version of bitwise LMD-GHOST by Vitalik.
OptimisedLMDGhost, BitwiseLMDGhost,
} }

View File

@ -1,7 +1,7 @@
use crate::{ForkChoice, ForkChoiceError}; use crate::{ForkChoice, ForkChoiceError};
use db::{stores::BeaconBlockStore, ClientDB}; use db::{stores::BeaconBlockStore, ClientDB};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, Hash256, Slot}; use types::{BeaconBlock, ChainSpec, Hash256, Slot};
pub struct LongestChain<T> pub struct LongestChain<T>
where where
@ -30,6 +30,7 @@ impl<T: ClientDB + Sized> ForkChoice for LongestChain<T> {
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,
block_hash: &Hash256, block_hash: &Hash256,
_: &ChainSpec,
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// add the block hash to head_block_hashes removing the parent if it exists // add the block hash to head_block_hashes removing the parent if it exists
self.head_block_hashes self.head_block_hashes
@ -38,12 +39,17 @@ impl<T: ClientDB + Sized> ForkChoice for LongestChain<T> {
Ok(()) Ok(())
} }
fn add_attestation(&mut self, _: u64, _: &Hash256) -> Result<(), ForkChoiceError> { fn add_attestation(
&mut self,
_: u64,
_: &Hash256,
_: &ChainSpec,
) -> Result<(), ForkChoiceError> {
// do nothing // do nothing
Ok(()) Ok(())
} }
fn find_head(&mut self, _: &Hash256) -> Result<Hash256, ForkChoiceError> { fn find_head(&mut self, _: &Hash256, _: &ChainSpec) -> Result<Hash256, ForkChoiceError> {
let mut head_blocks: Vec<(usize, BeaconBlock)> = vec![]; let mut head_blocks: Vec<(usize, BeaconBlock)> = vec![];
/* /*
* Load all the head_block hashes from the DB as SszBeaconBlocks. * Load all the head_block hashes from the DB as SszBeaconBlocks.

View File

@ -1,23 +1,3 @@
// Copyright 2019 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
extern crate db; extern crate db;
use crate::{ForkChoice, ForkChoiceError}; use crate::{ForkChoice, ForkChoiceError};
@ -25,21 +5,16 @@ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
ClientDB, ClientDB,
}; };
use log::{debug, trace};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock,
Hash256, Slot, ChainSpec, Hash256, Slot,
}; };
//TODO: Pruning and syncing //TODO: Pruning and syncing
//TODO: Sort out global constants
const GENESIS_SLOT: u64 = 0;
const FORK_CHOICE_BALANCE_INCREMENT: u64 = 1e9 as u64;
const MAX_DEPOSIT_AMOUNT: u64 = 32e9 as u64;
const EPOCH_LENGTH: u64 = 64;
pub struct SlowLMDGhost<T: ClientDB + Sized> { pub struct SlowLMDGhost<T: ClientDB + Sized> {
/// The latest attestation targets as a map of validator index to block hash. /// The latest attestation targets as a map of validator index to block hash.
//TODO: Could this be a fixed size vec //TODO: Could this be a fixed size vec
@ -56,12 +31,15 @@ impl<T> SlowLMDGhost<T>
where where
T: ClientDB + Sized, T: ClientDB + Sized,
{ {
pub fn new(block_store: BeaconBlockStore<T>, state_store: BeaconStateStore<T>) -> Self { pub fn new(
block_store: Arc<BeaconBlockStore<T>>,
state_store: Arc<BeaconStateStore<T>>,
) -> Self {
SlowLMDGhost { SlowLMDGhost {
latest_attestation_targets: HashMap::new(), latest_attestation_targets: HashMap::new(),
children: HashMap::new(), children: HashMap::new(),
block_store: Arc::new(block_store), block_store,
state_store: Arc::new(state_store), state_store,
} }
} }
@ -71,6 +49,7 @@ where
&self, &self,
state_root: &Hash256, state_root: &Hash256,
block_slot: Slot, block_slot: Slot,
spec: &ChainSpec,
) -> Result<HashMap<Hash256, u64>, ForkChoiceError> { ) -> Result<HashMap<Hash256, u64>, ForkChoiceError> {
// get latest votes // get latest votes
// Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) //
@ -84,21 +63,22 @@ where
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
let active_validator_indices = get_active_validator_indices( let active_validator_indices = get_active_validator_indices(
&current_state.validator_registry, &current_state.validator_registry[..],
block_slot.epoch(EPOCH_LENGTH), block_slot.epoch(spec.epoch_length),
); );
for index in active_validator_indices { for index in active_validator_indices {
let balance = let balance = std::cmp::min(
std::cmp::min(current_state.validator_balances[index], MAX_DEPOSIT_AMOUNT) current_state.validator_balances[index],
/ FORK_CHOICE_BALANCE_INCREMENT; spec.max_deposit_amount,
) / spec.fork_choice_balance_increment;
if balance > 0 { if balance > 0 {
if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) {
*latest_votes.entry(*target).or_insert_with(|| 0) += balance; *latest_votes.entry(*target).or_insert_with(|| 0) += balance;
} }
} }
} }
trace!("Latest votes: {:?}", latest_votes);
Ok(latest_votes) Ok(latest_votes)
} }
@ -117,12 +97,12 @@ where
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))?
.slot(); .slot();
for (target_hash, votes) in latest_votes.iter() { for (vote_hash, votes) in latest_votes.iter() {
let (root_at_slot, _) = self let (root_at_slot, _) = self
.block_store .block_store
.block_at_slot(&block_root, block_slot)? .block_at_slot(&vote_hash, block_slot)?
.ok_or(ForkChoiceError::MissingBeaconBlock(*block_root))?; .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))?;
if root_at_slot == *target_hash { if root_at_slot == *block_root {
count += votes; count += votes;
} }
} }
@ -136,6 +116,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,
block_hash: &Hash256, block_hash: &Hash256,
_: &ChainSpec,
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// build the children hashmap // build the children hashmap
// add the new block to the children of parent // add the new block to the children of parent
@ -153,22 +134,29 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
&mut self, &mut self,
validator_index: u64, validator_index: u64,
target_block_root: &Hash256, target_block_root: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// simply add the attestation to the latest_attestation_target if the block_height is // simply add the attestation to the latest_attestation_target if the block_height is
// larger // larger
trace!(
"Adding attestation of validator: {:?} for block: {}",
validator_index,
target_block_root
);
let attestation_target = self let attestation_target = self
.latest_attestation_targets .latest_attestation_targets
.entry(validator_index) .entry(validator_index)
.or_insert_with(|| *target_block_root); .or_insert_with(|| *target_block_root);
// if we already have a value // if we already have a value
if attestation_target != target_block_root { if attestation_target != target_block_root {
trace!("Old attestation found: {:?}", attestation_target);
// get the height of the target block // get the height of the target block
let block_height = self let block_height = self
.block_store .block_store
.get_deserialized(&target_block_root)? .get_deserialized(&target_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
// get the height of the past target block // get the height of the past target block
let past_block_height = self let past_block_height = self
@ -176,9 +164,10 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
.get_deserialized(&attestation_target)? .get_deserialized(&attestation_target)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))?
.slot() .slot()
.height(Slot::from(GENESIS_SLOT)); .height(spec.genesis_slot);
// update the attestation only if the new target is higher // update the attestation only if the new target is higher
if past_block_height < block_height { if past_block_height < block_height {
trace!("Updating old attestation");
*attestation_target = *target_block_root; *attestation_target = *target_block_root;
} }
} }
@ -186,7 +175,12 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
} }
/// A very inefficient implementation of LMD ghost. /// A very inefficient implementation of LMD ghost.
fn find_head(&mut self, justified_block_start: &Hash256) -> Result<Hash256, ForkChoiceError> { fn find_head(
&mut self,
justified_block_start: &Hash256,
spec: &ChainSpec,
) -> Result<Hash256, ForkChoiceError> {
debug!("Running LMD Ghost Fork-choice rule");
let start = self let start = self
.block_store .block_store
.get_deserialized(&justified_block_start)? .get_deserialized(&justified_block_start)?
@ -194,12 +188,12 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
let start_state_root = start.state_root(); let start_state_root = start.state_root();
let latest_votes = self.get_latest_votes(&start_state_root, start.slot())?; let latest_votes = self.get_latest_votes(&start_state_root, start.slot(), spec)?;
let mut head_hash = Hash256::zero(); let mut head_hash = *justified_block_start;
loop { loop {
let mut head_vote_count = 0; debug!("Iteration for block: {}", head_hash);
let children = match self.children.get(&head_hash) { let children = match self.children.get(&head_hash) {
Some(children) => children, Some(children) => children,
@ -207,8 +201,18 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
None => break, None => break,
}; };
// if we only have one child, use it
if children.len() == 1 {
trace!("Single child found.");
head_hash = children[0];
continue;
}
trace!("Children found: {:?}", children);
let mut head_vote_count = 0;
for child_hash in children { for child_hash in children {
let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; let vote_count = self.get_vote_count(&latest_votes, &child_hash)?;
trace!("Vote count for child: {} is: {}", child_hash, vote_count);
if vote_count > head_vote_count { if vote_count > head_vote_count {
head_hash = *child_hash; head_hash = *child_hash;

View File

@ -0,0 +1,37 @@
title: Fork-choice Tests
summary: A collection of abstract fork-choice tests for bitwise lmd ghost.
test_suite: Fork-Choice
test_cases:
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b1'
- id: 'b3'
parent: 'b1'
weights:
- b0: 0
- b1: 0
- b2: 5
- b3: 10
heads:
- id: 'b3'
# bitwise LMD ghost example. bitwise GHOST gives b2
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
weights:
- b1: 5
- b2: 4
- b3: 3
heads:
- id: 'b2'

View File

@ -0,0 +1,37 @@
title: Fork-choice Tests
summary: A collection of abstract fork-choice tests for lmd ghost.
test_suite: Fork-Choice
test_cases:
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b1'
- id: 'b3'
parent: 'b1'
weights:
- b0: 0
- b1: 0
- b2: 5
- b3: 10
heads:
- id: 'b3'
# bitwise LMD ghost example. GHOST gives b1
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
weights:
- b1: 5
- b2: 4
- b3: 3
heads:
- id: 'b1'

View File

@ -0,0 +1,51 @@
title: Fork-choice Tests
summary: A collection of abstract fork-choice tests to verify the longest chain fork-choice rule.
test_suite: Fork-Choice
test_cases:
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b1'
- id: 'b3'
parent: 'b1'
- id: 'b4'
parent: 'b3'
weights:
- b0: 0
- b1: 0
- b2: 10
- b3: 1
heads:
- id: 'b4'
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b1'
- id: 'b3'
parent: 'b2'
- id: 'b4'
parent: 'b3'
- id: 'b5'
parent: 'b0'
- id: 'b6'
parent: 'b5'
- id: 'b7'
parent: 'b6'
- id: 'b8'
parent: 'b7'
- id: 'b9'
parent: 'b8'
weights:
- b0: 5
- b1: 20
- b2: 10
- b3: 10
heads:
- id: 'b9'

View File

@ -0,0 +1,281 @@
// Tests the available fork-choice algorithms
extern crate beacon_chain;
extern crate bls;
extern crate db;
//extern crate env_logger; // for debugging
extern crate fork_choice;
extern crate hex;
extern crate log;
extern crate slot_clock;
extern crate types;
extern crate yaml_rust;
pub use beacon_chain::BeaconChain;
use bls::{PublicKey, Signature};
use db::stores::{BeaconBlockStore, BeaconStateStore};
use db::MemoryDB;
//use env_logger::{Builder, Env};
use fork_choice::{BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost};
use ssz::ssz_encode;
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs::File, io::prelude::*, path::PathBuf};
use types::{
BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Epoch, Eth1Data, Hash256, Slot, Validator,
};
use yaml_rust::yaml;
// Note: We Assume the block Id's are hex-encoded.
#[test]
fn test_bitwise_lmd_ghost() {
// set up logging
//Builder::from_env(Env::default().default_filter_or("trace")).init();
test_yaml_vectors(
ForkChoiceAlgorithm::BitwiseLMDGhost,
"tests/bitwise_lmd_ghost_test_vectors.yaml",
100,
);
}
#[test]
fn test_slow_lmd_ghost() {
test_yaml_vectors(
ForkChoiceAlgorithm::SlowLMDGhost,
"tests/lmd_ghost_test_vectors.yaml",
100,
);
}
#[test]
fn test_longest_chain() {
test_yaml_vectors(
ForkChoiceAlgorithm::LongestChain,
"tests/longest_chain_test_vectors.yaml",
100,
);
}
// run a generic test over given YAML test vectors
fn test_yaml_vectors(
fork_choice_algo: ForkChoiceAlgorithm,
yaml_file_path: &str,
emulated_validators: usize, // the number of validators used to give weights.
) {
// load test cases from yaml
let test_cases = load_test_cases_from_yaml(yaml_file_path);
// default vars
let spec = ChainSpec::foundation();
let zero_hash = Hash256::zero();
let eth1_data = Eth1Data {
deposit_root: zero_hash.clone(),
block_hash: zero_hash.clone(),
};
let randao_reveal = Signature::empty_signature();
let signature = Signature::empty_signature();
let body = BeaconBlockBody {
proposer_slashings: vec![],
attester_slashings: vec![],
attestations: vec![],
deposits: vec![],
exits: vec![],
};
// process the tests
for test_case in test_cases {
// setup a fresh test
let (mut fork_choice, block_store, state_root) =
setup_inital_state(&fork_choice_algo, emulated_validators);
// keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id)
//let mut block_id_map: HashMap<String, Hash256> = HashMap::new();
// keep a list of hash to slot
let mut block_slot: HashMap<Hash256, Slot> = HashMap::new();
// assume the block tree is given to us in order.
let mut genesis_hash = None;
for block in test_case["blocks"].clone().into_vec().unwrap() {
let block_id = block["id"].as_str().unwrap().to_string();
let parent_id = block["parent"].as_str().unwrap().to_string();
// default params for genesis
let block_hash = id_to_hash(&block_id);
let mut slot = spec.genesis_slot;
let parent_root = id_to_hash(&parent_id);
// set the slot and parent based off the YAML. Start with genesis;
// if not the genesis, update slot
if parent_id != block_id {
// find parent slot
slot = *(block_slot
.get(&parent_root)
.expect("Parent should have a slot number"))
+ 1;
} else {
genesis_hash = Some(block_hash);
}
// update slot mapping
block_slot.insert(block_hash, slot);
// build the BeaconBlock
let beacon_block = BeaconBlock {
slot,
parent_root,
state_root: state_root.clone(),
randao_reveal: randao_reveal.clone(),
eth1_data: eth1_data.clone(),
signature: signature.clone(),
body: body.clone(),
};
// Store the block.
block_store
.put(&block_hash, &ssz_encode(&beacon_block)[..])
.unwrap();
// run add block for fork choice if not genesis
if parent_id != block_id {
fork_choice
.add_block(&beacon_block, &block_hash, &spec)
.unwrap();
}
}
// add the weights (attestations)
let mut current_validator = 0;
for id_map in test_case["weights"].clone().into_vec().unwrap() {
// get the block id and weights
for (map_id, map_weight) in id_map.as_hash().unwrap().iter() {
let id = map_id.as_str().unwrap();
let block_root = id_to_hash(&id.to_string());
let weight = map_weight.as_i64().unwrap();
// we assume a validator has a value 1 and add an attestation for to achieve the
// correct weight
for _ in 0..weight {
assert!(
current_validator <= emulated_validators,
"Not enough validators to emulate weights"
);
fork_choice
.add_attestation(current_validator as u64, &block_root, &spec)
.unwrap();
current_validator += 1;
}
}
}
// everything is set up, run the fork choice, using genesis as the head
let head = fork_choice
.find_head(&genesis_hash.unwrap(), &spec)
.unwrap();
// compare the result to the expected test
let success = test_case["heads"]
.clone()
.into_vec()
.unwrap()
.iter()
.find(|heads| id_to_hash(&heads["id"].as_str().unwrap().to_string()) == head)
.is_some();
println!("Head found: {}", head);
assert!(success, "Did not find one of the possible heads");
}
}
// loads the test_cases from the supplied yaml file
fn load_test_cases_from_yaml(file_path: &str) -> Vec<yaml_rust::Yaml> {
// load the yaml
let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push(file_path);
File::open(file_path_buf).unwrap()
};
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap();
let doc = &docs[0];
doc["test_cases"].as_vec().unwrap().clone()
}
// initialise a single validator and state. All blocks will reference this state root.
fn setup_inital_state(
fork_choice_algo: &ForkChoiceAlgorithm,
no_validators: usize,
) -> (Box<ForkChoice>, Arc<BeaconBlockStore<MemoryDB>>, Hash256) {
let zero_hash = Hash256::zero();
let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
// the fork choice instantiation
let fork_choice: Box<ForkChoice> = match fork_choice_algo {
ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new(
block_store.clone(),
state_store.clone(),
)),
ForkChoiceAlgorithm::SlowLMDGhost => {
Box::new(SlowLMDGhost::new(block_store.clone(), state_store.clone()))
}
ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())),
};
// misc vars for setting up the state
let genesis_time = 1_550_381_159;
let latest_eth1_data = Eth1Data {
deposit_root: zero_hash.clone(),
block_hash: zero_hash.clone(),
};
let initial_validator_deposits = vec![];
let spec = ChainSpec::foundation();
// create the state
let mut state = BeaconState::genesis(
genesis_time,
initial_validator_deposits,
latest_eth1_data,
&spec,
)
.unwrap();
let default_validator = Validator {
pubkey: PublicKey::default(),
withdrawal_credentials: zero_hash,
activation_epoch: Epoch::from(0u64),
exit_epoch: spec.far_future_epoch,
withdrawal_epoch: spec.far_future_epoch,
penalized_epoch: spec.far_future_epoch,
status_flags: None,
};
// activate the validators
for _ in 0..no_validators {
state.validator_registry.push(default_validator.clone());
state.validator_balances.push(32_000_000_000);
}
let state_root = state.canonical_root();
state_store
.put(&state_root, &ssz_encode(&state)[..])
.unwrap();
// return initialised vars
(fork_choice, block_store, state_root)
}
// convert a block_id into a Hash256 -- assume input is hex encoded;
fn id_to_hash(id: &String) -> Hash256 {
let bytes = hex::decode(id).expect("Block ID should be hex");
let len = std::cmp::min(bytes.len(), 32);
let mut fixed_bytes = [0u8; 32];
for (index, byte) in bytes.iter().take(32).enumerate() {
fixed_bytes[32 - len + index] = *byte;
}
Hash256::from(fixed_bytes)
}

View File

@ -1,7 +1,7 @@
use crate::SlotProcessingError; use crate::SlotProcessingError;
use hashing::hash; use hashing::hash;
use int_to_bytes::int_to_bytes32; use int_to_bytes::int_to_bytes32;
use log::debug; use log::{debug, trace};
use ssz::{ssz_encode, TreeHash}; use ssz::{ssz_encode, TreeHash};
use types::{ use types::{
beacon_state::{AttestationParticipantsError, BeaconStateError}, beacon_state::{AttestationParticipantsError, BeaconStateError},
@ -219,6 +219,8 @@ fn per_block_processing_signature_optional(
Error::MaxAttestationsExceeded Error::MaxAttestationsExceeded
); );
debug!("Verifying {} attestations.", block.body.attestations.len());
for attestation in &block.body.attestations { for attestation in &block.body.attestations {
validate_attestation(&state, attestation, spec)?; validate_attestation(&state, attestation, spec)?;
@ -231,11 +233,6 @@ fn per_block_processing_signature_optional(
state.latest_attestations.push(pending_attestation); state.latest_attestations.push(pending_attestation);
} }
debug!(
"{} attestations verified & processed.",
block.body.attestations.len()
);
/* /*
* Deposits * Deposits
*/ */
@ -312,6 +309,10 @@ fn validate_attestation_signature_optional(
spec: &ChainSpec, spec: &ChainSpec,
verify_signature: bool, verify_signature: bool,
) -> Result<(), AttestationValidationError> { ) -> Result<(), AttestationValidationError> {
trace!(
"validate_attestation_signature_optional: attestation epoch: {}",
attestation.data.slot.epoch(spec.epoch_length)
);
ensure!( ensure!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
AttestationValidationError::IncludedTooEarly AttestationValidationError::IncludedTooEarly

View File

@ -144,8 +144,10 @@ impl EpochProcessable for BeaconState {
let previous_epoch_attester_indices = let previous_epoch_attester_indices =
self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?; self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?;
let previous_total_balance = let previous_total_balance = self.get_total_balance(
self.get_total_balance(&previous_epoch_attester_indices[..], spec); &get_active_validator_indices(&self.validator_registry, previous_epoch),
spec,
);
/* /*
* Validators targetting the previous justified slot * Validators targetting the previous justified slot
@ -315,6 +317,11 @@ impl EpochProcessable for BeaconState {
// for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot { // for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot {
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) { for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
trace!(
"Finding winning root for slot: {} (epoch: {})",
slot,
slot.epoch(spec.epoch_length)
);
let crosslink_committees_at_slot = let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, false, spec)?; self.get_crosslink_committees_at_slot(slot, false, spec)?;
@ -352,7 +359,8 @@ impl EpochProcessable for BeaconState {
/* /*
* Rewards and Penalities * Rewards and Penalities
*/ */
let base_reward_quotient = previous_total_balance.integer_sqrt(); let base_reward_quotient =
previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
if base_reward_quotient == 0 { if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero); return Err(Error::BaseRewardQuotientIsZero);
} }
@ -539,6 +547,12 @@ impl EpochProcessable for BeaconState {
*/ */
self.previous_calculation_epoch = self.current_calculation_epoch; self.previous_calculation_epoch = self.current_calculation_epoch;
self.previous_epoch_start_shard = self.current_epoch_start_shard; self.previous_epoch_start_shard = self.current_epoch_start_shard;
debug!(
"setting previous_epoch_seed to : {}",
self.current_epoch_seed
);
self.previous_epoch_seed = self.current_epoch_seed; self.previous_epoch_seed = self.current_epoch_seed;
let should_update_validator_registy = if self.finalized_epoch let should_update_validator_registy = if self.finalized_epoch
@ -553,6 +567,7 @@ impl EpochProcessable for BeaconState {
}; };
if should_update_validator_registy { if should_update_validator_registy {
trace!("updating validator registry.");
self.update_validator_registry(spec); self.update_validator_registry(spec);
self.current_calculation_epoch = next_epoch; self.current_calculation_epoch = next_epoch;
@ -561,6 +576,7 @@ impl EpochProcessable for BeaconState {
% spec.shard_count; % spec.shard_count;
self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)? self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)?
} else { } else {
trace!("not updating validator registry.");
let epochs_since_last_registry_update = let epochs_since_last_registry_update =
current_epoch - self.validator_registry_update_epoch; current_epoch - self.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1) if (epochs_since_last_registry_update > 1)

View File

@ -18,7 +18,8 @@ serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
slog = "^2.2.3" slog = "^2.2.3"
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
fisher_yates_shuffle = { path = "../utils/fisher_yates_shuffle" } ssz_derive = { path = "../utils/ssz_derive" }
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
[dev-dependencies] [dev-dependencies]
env_logger = "0.6.0" env_logger = "0.6.0"

View File

@ -2,9 +2,10 @@ use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, H
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, Clone, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)]
pub struct Attestation { pub struct Attestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData, pub data: AttestationData,
@ -33,39 +34,13 @@ impl Attestation {
} }
} }
impl Encodable for Attestation {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.aggregation_bitfield);
s.append(&self.data);
s.append(&self.custody_bitfield);
s.append(&self.aggregate_signature);
}
}
impl Decodable for Attestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (aggregation_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (data, i) = AttestationData::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (aggregate_signature, i) = AggregateSignature::ssz_decode(bytes, i)?;
let attestation_record = Self {
aggregation_bitfield,
data,
custody_bitfield,
aggregate_signature,
};
Ok((attestation_record, i))
}
}
impl TreeHash for Attestation { impl TreeHash for Attestation {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.aggregation_bitfield.hash_tree_root()); result.append(&mut self.aggregation_bitfield.hash_tree_root_internal());
result.append(&mut self.data.hash_tree_root()); result.append(&mut self.data.hash_tree_root_internal());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.custody_bitfield.hash_tree_root_internal());
result.append(&mut self.aggregate_signature.hash_tree_root()); result.append(&mut self.aggregate_signature.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -85,7 +60,7 @@ impl<T: RngCore> TestRandom<T> for Attestation {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -99,11 +74,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Attestation::random_for_test(&mut rng); let original = Attestation::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,7 +2,8 @@ use crate::test_utils::TestRandom;
use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot}; use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
pub const SSZ_ATTESTION_DATA_LENGTH: usize = { pub const SSZ_ATTESTION_DATA_LENGTH: usize = {
8 + // slot 8 + // slot
@ -15,7 +16,7 @@ pub const SSZ_ATTESTION_DATA_LENGTH: usize = {
32 // justified_block_root 32 // justified_block_root
}; };
#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)]
pub struct AttestationData { pub struct AttestationData {
pub slot: Slot, pub slot: Slot,
pub shard: u64, pub shard: u64,
@ -43,55 +44,17 @@ impl AttestationData {
} }
} }
impl Encodable for AttestationData {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.shard);
s.append(&self.beacon_block_root);
s.append(&self.epoch_boundary_root);
s.append(&self.shard_block_root);
s.append(&self.latest_crosslink);
s.append(&self.justified_epoch);
s.append(&self.justified_block_root);
}
}
impl Decodable for AttestationData {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?;
let (shard, i) = <_>::ssz_decode(bytes, i)?;
let (beacon_block_root, i) = <_>::ssz_decode(bytes, i)?;
let (epoch_boundary_root, i) = <_>::ssz_decode(bytes, i)?;
let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?;
let (latest_crosslink, i) = <_>::ssz_decode(bytes, i)?;
let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (justified_block_root, i) = <_>::ssz_decode(bytes, i)?;
let attestation_data = AttestationData {
slot,
shard,
beacon_block_root,
epoch_boundary_root,
shard_block_root,
latest_crosslink,
justified_epoch,
justified_block_root,
};
Ok((attestation_data, i))
}
}
impl TreeHash for AttestationData { impl TreeHash for AttestationData {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
result.append(&mut self.shard.hash_tree_root()); result.append(&mut self.shard.hash_tree_root_internal());
result.append(&mut self.beacon_block_root.hash_tree_root()); result.append(&mut self.beacon_block_root.hash_tree_root_internal());
result.append(&mut self.epoch_boundary_root.hash_tree_root()); result.append(&mut self.epoch_boundary_root.hash_tree_root_internal());
result.append(&mut self.shard_block_root.hash_tree_root()); result.append(&mut self.shard_block_root.hash_tree_root_internal());
result.append(&mut self.latest_crosslink.hash_tree_root()); result.append(&mut self.latest_crosslink.hash_tree_root_internal());
result.append(&mut self.justified_epoch.hash_tree_root()); result.append(&mut self.justified_epoch.hash_tree_root_internal());
result.append(&mut self.justified_block_root.hash_tree_root()); result.append(&mut self.justified_block_root.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -115,7 +78,7 @@ impl<T: RngCore> TestRandom<T> for AttestationData {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -129,11 +92,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationData::random_for_test(&mut rng); let original = AttestationData::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,38 +2,21 @@ use super::AttestationData;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::TreeHash;
use ssz_derive::{Decode, Encode};
#[derive(Debug, Clone, PartialEq, Default, Serialize)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)]
pub struct AttestationDataAndCustodyBit { pub struct AttestationDataAndCustodyBit {
pub data: AttestationData, pub data: AttestationData,
pub custody_bit: bool, pub custody_bit: bool,
} }
impl Encodable for AttestationDataAndCustodyBit {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.data);
// TODO: deal with bools
}
}
impl Decodable for AttestationDataAndCustodyBit {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (data, i) = <_>::ssz_decode(bytes, i)?;
let custody_bit = false;
let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { data, custody_bit };
Ok((attestation_data_and_custody_bit, i))
}
}
impl TreeHash for AttestationDataAndCustodyBit { impl TreeHash for AttestationDataAndCustodyBit {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.data.hash_tree_root()); result.append(&mut self.data.hash_tree_root_internal());
// TODO: add bool ssz // TODO: add bool ssz
// result.append(custody_bit.hash_tree_root()); // result.append(custody_bit.hash_tree_root_internal());
ssz::hash(&result) ssz::hash(&result)
} }
} }
@ -52,7 +35,7 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
mod test { mod test {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -68,11 +51,11 @@ mod test {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); let original = AttestationDataAndCustodyBit::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,41 +1,20 @@
use crate::{test_utils::TestRandom, SlashableAttestation}; use crate::{test_utils::TestRandom, SlashableAttestation};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct AttesterSlashing { pub struct AttesterSlashing {
pub slashable_attestation_1: SlashableAttestation, pub slashable_attestation_1: SlashableAttestation,
pub slashable_attestation_2: SlashableAttestation, pub slashable_attestation_2: SlashableAttestation,
} }
impl Encodable for AttesterSlashing {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slashable_attestation_1);
s.append(&self.slashable_attestation_2);
}
}
impl Decodable for AttesterSlashing {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slashable_attestation_1, i) = <_>::ssz_decode(bytes, i)?;
let (slashable_attestation_2, i) = <_>::ssz_decode(bytes, i)?;
Ok((
AttesterSlashing {
slashable_attestation_1,
slashable_attestation_2,
},
i,
))
}
}
impl TreeHash for AttesterSlashing { impl TreeHash for AttesterSlashing {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slashable_attestation_1.hash_tree_root()); result.append(&mut self.slashable_attestation_1.hash_tree_root_internal());
result.append(&mut self.slashable_attestation_2.hash_tree_root()); result.append(&mut self.slashable_attestation_2.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -53,7 +32,7 @@ impl<T: RngCore> TestRandom<T> for AttesterSlashing {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -67,11 +46,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng); let original = AttesterSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -3,9 +3,10 @@ use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData, S
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct BeaconBlock { pub struct BeaconBlock {
pub slot: Slot, pub slot: Slot,
pub parent_root: Hash256, pub parent_root: Hash256,
@ -59,53 +60,16 @@ impl BeaconBlock {
} }
} }
impl Encodable for BeaconBlock {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.parent_root);
s.append(&self.state_root);
s.append(&self.randao_reveal);
s.append(&self.eth1_data);
s.append(&self.signature);
s.append(&self.body);
}
}
impl Decodable for BeaconBlock {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?;
let (parent_root, i) = <_>::ssz_decode(bytes, i)?;
let (state_root, i) = <_>::ssz_decode(bytes, i)?;
let (randao_reveal, i) = <_>::ssz_decode(bytes, i)?;
let (eth1_data, i) = <_>::ssz_decode(bytes, i)?;
let (signature, i) = <_>::ssz_decode(bytes, i)?;
let (body, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
slot,
parent_root,
state_root,
randao_reveal,
eth1_data,
signature,
body,
},
i,
))
}
}
impl TreeHash for BeaconBlock { impl TreeHash for BeaconBlock {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
result.append(&mut self.parent_root.hash_tree_root()); result.append(&mut self.parent_root.hash_tree_root_internal());
result.append(&mut self.state_root.hash_tree_root()); result.append(&mut self.state_root.hash_tree_root_internal());
result.append(&mut self.randao_reveal.hash_tree_root()); result.append(&mut self.randao_reveal.hash_tree_root_internal());
result.append(&mut self.eth1_data.hash_tree_root()); result.append(&mut self.eth1_data.hash_tree_root_internal());
result.append(&mut self.signature.hash_tree_root()); result.append(&mut self.signature.hash_tree_root_internal());
result.append(&mut self.body.hash_tree_root()); result.append(&mut self.body.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -128,7 +92,7 @@ impl<T: RngCore> TestRandom<T> for BeaconBlock {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -142,11 +106,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlock::random_for_test(&mut rng); let original = BeaconBlock::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,9 +2,10 @@ use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
pub struct BeaconBlockBody { pub struct BeaconBlockBody {
pub proposer_slashings: Vec<ProposerSlashing>, pub proposer_slashings: Vec<ProposerSlashing>,
pub attester_slashings: Vec<AttesterSlashing>, pub attester_slashings: Vec<AttesterSlashing>,
@ -13,45 +14,14 @@ pub struct BeaconBlockBody {
pub exits: Vec<Exit>, pub exits: Vec<Exit>,
} }
impl Encodable for BeaconBlockBody {
fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.proposer_slashings);
s.append_vec(&self.attester_slashings);
s.append_vec(&self.attestations);
s.append_vec(&self.deposits);
s.append_vec(&self.exits);
}
}
impl Decodable for BeaconBlockBody {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (proposer_slashings, i) = <_>::ssz_decode(bytes, i)?;
let (attester_slashings, i) = <_>::ssz_decode(bytes, i)?;
let (attestations, i) = <_>::ssz_decode(bytes, i)?;
let (deposits, i) = <_>::ssz_decode(bytes, i)?;
let (exits, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
proposer_slashings,
attester_slashings,
attestations,
deposits,
exits,
},
i,
))
}
}
impl TreeHash for BeaconBlockBody { impl TreeHash for BeaconBlockBody {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.proposer_slashings.hash_tree_root()); result.append(&mut self.proposer_slashings.hash_tree_root_internal());
result.append(&mut self.attester_slashings.hash_tree_root()); result.append(&mut self.attester_slashings.hash_tree_root_internal());
result.append(&mut self.attestations.hash_tree_root()); result.append(&mut self.attestations.hash_tree_root_internal());
result.append(&mut self.deposits.hash_tree_root()); result.append(&mut self.deposits.hash_tree_root_internal());
result.append(&mut self.exits.hash_tree_root()); result.append(&mut self.exits.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -72,7 +42,7 @@ impl<T: RngCore> TestRandom<T> for BeaconBlockBody {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -86,11 +56,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlockBody::random_for_test(&mut rng); let original = BeaconBlockBody::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -5,15 +5,20 @@ use crate::{
PendingAttestation, PublicKey, Signature, Slot, Validator, PendingAttestation, PublicKey, Signature, Slot, Validator,
}; };
use bls::verify_proof_of_possession; use bls::verify_proof_of_possession;
use fisher_yates_shuffle::shuffle;
use honey_badger_split::SplitExt; use honey_badger_split::SplitExt;
use log::trace;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
use swap_or_not_shuffle::get_permutated_index;
mod tests;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BeaconStateError { pub enum BeaconStateError {
EpochOutOfBounds, EpochOutOfBounds,
UnableToShuffle,
InsufficientRandaoMixes, InsufficientRandaoMixes,
InsufficientValidators, InsufficientValidators,
InsufficientBlockRoots, InsufficientBlockRoots,
@ -47,7 +52,7 @@ macro_rules! safe_sub_assign {
}; };
} }
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
pub struct BeaconState { pub struct BeaconState {
// Misc // Misc
pub slot: Slot, pub slot: Slot,
@ -201,7 +206,12 @@ impl BeaconState {
/// ///
/// Spec v0.2.0 /// Spec v0.2.0
pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch {
self.current_epoch(spec).saturating_sub(1_u64) let current_epoch = self.current_epoch(&spec);
if current_epoch == spec.genesis_epoch {
current_epoch
} else {
current_epoch - 1
}
} }
/// The epoch following `self.current_epoch()`. /// The epoch following `self.current_epoch()`.
@ -249,23 +259,50 @@ impl BeaconState {
/// committee is itself a list of validator indices. /// committee is itself a list of validator indices.
/// ///
/// Spec v0.1 /// Spec v0.1
pub fn get_shuffling(&self, seed: Hash256, epoch: Epoch, spec: &ChainSpec) -> Vec<Vec<usize>> { pub fn get_shuffling(
&self,
seed: Hash256,
epoch: Epoch,
spec: &ChainSpec,
) -> Option<Vec<Vec<usize>>> {
let active_validator_indices = let active_validator_indices =
get_active_validator_indices(&self.validator_registry, epoch); get_active_validator_indices(&self.validator_registry, epoch);
if active_validator_indices.is_empty() {
return None;
}
trace!(
"get_shuffling: active_validator_indices.len() == {}",
active_validator_indices.len()
);
let committees_per_epoch = let committees_per_epoch =
self.get_epoch_committee_count(active_validator_indices.len(), spec); self.get_epoch_committee_count(active_validator_indices.len(), spec);
// TODO: check that Hash256::from(u64) matches 'int_to_bytes32'. trace!(
let seed = seed ^ Hash256::from(epoch.as_u64()); "get_shuffling: active_validator_indices.len() == {}, committees_per_epoch: {}",
// TODO: fix `expect` assert. active_validator_indices.len(),
let shuffled_active_validator_indices = committees_per_epoch
shuffle(&seed, active_validator_indices).expect("Max validator count exceed!"); );
let mut shuffled_active_validator_indices = vec![0; active_validator_indices.len()];
for &i in &active_validator_indices {
let shuffled_i = get_permutated_index(
i,
active_validator_indices.len(),
&seed[..],
spec.shuffle_round_count,
)?;
shuffled_active_validator_indices[i] = active_validator_indices[shuffled_i]
}
Some(
shuffled_active_validator_indices shuffled_active_validator_indices
.honey_badger_split(committees_per_epoch as usize) .honey_badger_split(committees_per_epoch as usize)
.map(|slice: &[usize]| slice.to_vec()) .map(|slice: &[usize]| slice.to_vec())
.collect() .collect(),
)
} }
/// Return the number of committees in the previous epoch. /// Return the number of committees in the previous epoch.
@ -303,9 +340,17 @@ impl BeaconState {
+ 1; + 1;
let latest_index_root = current_epoch + spec.entry_exit_delay; let latest_index_root = current_epoch + spec.entry_exit_delay;
trace!(
"get_active_index_root: epoch: {}, earliest: {}, latest: {}",
epoch,
earliest_index_root,
latest_index_root
);
if (epoch >= earliest_index_root) & (epoch <= latest_index_root) { if (epoch >= earliest_index_root) & (epoch <= latest_index_root) {
Some(self.latest_index_roots[epoch.as_usize() % spec.latest_index_roots_length]) Some(self.latest_index_roots[epoch.as_usize() % spec.latest_index_roots_length])
} else { } else {
trace!("get_active_index_root: epoch out of range.");
None None
} }
} }
@ -350,29 +395,28 @@ impl BeaconState {
) -> Result<Vec<(Vec<usize>, u64)>, BeaconStateError> { ) -> Result<Vec<(Vec<usize>, u64)>, BeaconStateError> {
let epoch = slot.epoch(spec.epoch_length); let epoch = slot.epoch(spec.epoch_length);
let current_epoch = self.current_epoch(spec); let current_epoch = self.current_epoch(spec);
let previous_epoch = if current_epoch == spec.genesis_epoch { let previous_epoch = self.previous_epoch(spec);
current_epoch
} else {
current_epoch.saturating_sub(1_u64)
};
let next_epoch = self.next_epoch(spec); let next_epoch = self.next_epoch(spec);
let (committees_per_epoch, seed, shuffling_epoch, shuffling_start_shard) = let (committees_per_epoch, seed, shuffling_epoch, shuffling_start_shard) =
if epoch == previous_epoch { if epoch == current_epoch {
( trace!("get_crosslink_committees_at_slot: current_epoch");
self.get_previous_epoch_committee_count(spec),
self.previous_epoch_seed,
self.previous_calculation_epoch,
self.previous_epoch_start_shard,
)
} else if epoch == current_epoch {
( (
self.get_current_epoch_committee_count(spec), self.get_current_epoch_committee_count(spec),
self.current_epoch_seed, self.current_epoch_seed,
self.current_calculation_epoch, self.current_calculation_epoch,
self.current_epoch_start_shard, self.current_epoch_start_shard,
) )
} else if epoch == previous_epoch {
trace!("get_crosslink_committees_at_slot: previous_epoch");
(
self.get_previous_epoch_committee_count(spec),
self.previous_epoch_seed,
self.previous_calculation_epoch,
self.previous_epoch_start_shard,
)
} else if epoch == next_epoch { } else if epoch == next_epoch {
trace!("get_crosslink_committees_at_slot: next_epoch");
let current_committees_per_epoch = self.get_current_epoch_committee_count(spec); let current_committees_per_epoch = self.get_current_epoch_committee_count(spec);
let epochs_since_last_registry_update = let epochs_since_last_registry_update =
current_epoch - self.validator_registry_update_epoch; current_epoch - self.validator_registry_update_epoch;
@ -401,12 +445,21 @@ impl BeaconState {
return Err(BeaconStateError::EpochOutOfBounds); return Err(BeaconStateError::EpochOutOfBounds);
}; };
let shuffling = self.get_shuffling(seed, shuffling_epoch, spec); let shuffling = self
.get_shuffling(seed, shuffling_epoch, spec)
.ok_or_else(|| BeaconStateError::UnableToShuffle)?;
let offset = slot.as_u64() % spec.epoch_length; let offset = slot.as_u64() % spec.epoch_length;
let committees_per_slot = committees_per_epoch / spec.epoch_length; let committees_per_slot = committees_per_epoch / spec.epoch_length;
let slot_start_shard = let slot_start_shard =
(shuffling_start_shard + committees_per_slot * offset) % spec.shard_count; (shuffling_start_shard + committees_per_slot * offset) % spec.shard_count;
trace!(
"get_crosslink_committees_at_slot: committees_per_slot: {}, slot_start_shard: {}, seed: {}",
committees_per_slot,
slot_start_shard,
seed
);
let mut crosslinks_at_slot = vec![]; let mut crosslinks_at_slot = vec![];
for i in 0..committees_per_slot { for i in 0..committees_per_slot {
let tuple = ( let tuple = (
@ -458,6 +511,11 @@ impl BeaconState {
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<usize, BeaconStateError> { ) -> Result<usize, BeaconStateError> {
let committees = self.get_crosslink_committees_at_slot(slot, false, spec)?; let committees = self.get_crosslink_committees_at_slot(slot, false, spec)?;
trace!(
"get_beacon_proposer_index: slot: {}, committees_count: {}",
slot,
committees.len()
);
committees committees
.first() .first()
.ok_or(BeaconStateError::InsufficientValidators) .ok_or(BeaconStateError::InsufficientValidators)
@ -910,125 +968,38 @@ impl From<AttestationParticipantsError> for InclusionError {
} }
} }
impl Encodable for BeaconState {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.genesis_time);
s.append(&self.fork);
s.append(&self.validator_registry);
s.append(&self.validator_balances);
s.append(&self.validator_registry_update_epoch);
s.append(&self.latest_randao_mixes);
s.append(&self.previous_epoch_start_shard);
s.append(&self.current_epoch_start_shard);
s.append(&self.previous_calculation_epoch);
s.append(&self.current_calculation_epoch);
s.append(&self.previous_epoch_seed);
s.append(&self.current_epoch_seed);
s.append(&self.previous_justified_epoch);
s.append(&self.justified_epoch);
s.append(&self.justification_bitfield);
s.append(&self.finalized_epoch);
s.append(&self.latest_crosslinks);
s.append(&self.latest_block_roots);
s.append(&self.latest_index_roots);
s.append(&self.latest_penalized_balances);
s.append(&self.latest_attestations);
s.append(&self.batched_block_roots);
s.append(&self.latest_eth1_data);
s.append(&self.eth1_data_votes);
}
}
impl Decodable for BeaconState {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?;
let (genesis_time, i) = <_>::ssz_decode(bytes, i)?;
let (fork, i) = <_>::ssz_decode(bytes, i)?;
let (validator_registry, i) = <_>::ssz_decode(bytes, i)?;
let (validator_balances, i) = <_>::ssz_decode(bytes, i)?;
let (validator_registry_update_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (latest_randao_mixes, i) = <_>::ssz_decode(bytes, i)?;
let (previous_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?;
let (current_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?;
let (previous_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (current_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (previous_epoch_seed, i) = <_>::ssz_decode(bytes, i)?;
let (current_epoch_seed, i) = <_>::ssz_decode(bytes, i)?;
let (previous_justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (justification_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (finalized_epoch, i) = <_>::ssz_decode(bytes, i)?;
let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?;
let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?;
let (latest_index_roots, i) = <_>::ssz_decode(bytes, i)?;
let (latest_penalized_balances, i) = <_>::ssz_decode(bytes, i)?;
let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?;
let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?;
let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?;
let (eth1_data_votes, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
slot,
genesis_time,
fork,
validator_registry,
validator_balances,
validator_registry_update_epoch,
latest_randao_mixes,
previous_epoch_start_shard,
current_epoch_start_shard,
previous_calculation_epoch,
current_calculation_epoch,
previous_epoch_seed,
current_epoch_seed,
previous_justified_epoch,
justified_epoch,
justification_bitfield,
finalized_epoch,
latest_crosslinks,
latest_block_roots,
latest_index_roots,
latest_penalized_balances,
latest_attestations,
batched_block_roots,
latest_eth1_data,
eth1_data_votes,
},
i,
))
}
}
impl TreeHash for BeaconState { impl TreeHash for BeaconState {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
result.append(&mut self.genesis_time.hash_tree_root()); result.append(&mut self.genesis_time.hash_tree_root_internal());
result.append(&mut self.fork.hash_tree_root()); result.append(&mut self.fork.hash_tree_root_internal());
result.append(&mut self.validator_registry.hash_tree_root()); result.append(&mut self.validator_registry.hash_tree_root_internal());
result.append(&mut self.validator_balances.hash_tree_root()); result.append(&mut self.validator_balances.hash_tree_root_internal());
result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); result.append(
result.append(&mut self.latest_randao_mixes.hash_tree_root()); &mut self
result.append(&mut self.previous_epoch_start_shard.hash_tree_root()); .validator_registry_update_epoch
result.append(&mut self.current_epoch_start_shard.hash_tree_root()); .hash_tree_root_internal(),
result.append(&mut self.previous_calculation_epoch.hash_tree_root()); );
result.append(&mut self.current_calculation_epoch.hash_tree_root()); result.append(&mut self.latest_randao_mixes.hash_tree_root_internal());
result.append(&mut self.previous_epoch_seed.hash_tree_root()); result.append(&mut self.previous_epoch_start_shard.hash_tree_root_internal());
result.append(&mut self.current_epoch_seed.hash_tree_root()); result.append(&mut self.current_epoch_start_shard.hash_tree_root_internal());
result.append(&mut self.previous_justified_epoch.hash_tree_root()); result.append(&mut self.previous_calculation_epoch.hash_tree_root_internal());
result.append(&mut self.justified_epoch.hash_tree_root()); result.append(&mut self.current_calculation_epoch.hash_tree_root_internal());
result.append(&mut self.justification_bitfield.hash_tree_root()); result.append(&mut self.previous_epoch_seed.hash_tree_root_internal());
result.append(&mut self.finalized_epoch.hash_tree_root()); result.append(&mut self.current_epoch_seed.hash_tree_root_internal());
result.append(&mut self.latest_crosslinks.hash_tree_root()); result.append(&mut self.previous_justified_epoch.hash_tree_root_internal());
result.append(&mut self.latest_block_roots.hash_tree_root()); result.append(&mut self.justified_epoch.hash_tree_root_internal());
result.append(&mut self.latest_index_roots.hash_tree_root()); result.append(&mut self.justification_bitfield.hash_tree_root_internal());
result.append(&mut self.latest_penalized_balances.hash_tree_root()); result.append(&mut self.finalized_epoch.hash_tree_root_internal());
result.append(&mut self.latest_attestations.hash_tree_root()); result.append(&mut self.latest_crosslinks.hash_tree_root_internal());
result.append(&mut self.batched_block_roots.hash_tree_root()); result.append(&mut self.latest_block_roots.hash_tree_root_internal());
result.append(&mut self.latest_eth1_data.hash_tree_root()); result.append(&mut self.latest_index_roots.hash_tree_root_internal());
result.append(&mut self.eth1_data_votes.hash_tree_root()); result.append(&mut self.latest_penalized_balances.hash_tree_root_internal());
result.append(&mut self.latest_attestations.hash_tree_root_internal());
result.append(&mut self.batched_block_roots.hash_tree_root_internal());
result.append(&mut self.latest_eth1_data.hash_tree_root_internal());
result.append(&mut self.eth1_data_votes.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -1064,33 +1035,3 @@ impl<T: RngCore> TestRandom<T> for BeaconState {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let result = original.hash_tree_root();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -0,0 +1,97 @@
#![cfg(test)]
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use crate::{
beacon_state::BeaconStateError, BeaconState, ChainSpec, Deposit, DepositData, DepositInput,
Eth1Data, Hash256, Keypair,
};
use bls::create_proof_of_possession;
use ssz::{ssz_encode, Decodable};
struct BeaconStateTestBuilder {
pub genesis_time: u64,
pub initial_validator_deposits: Vec<Deposit>,
pub latest_eth1_data: Eth1Data,
pub spec: ChainSpec,
pub keypairs: Vec<Keypair>,
}
impl BeaconStateTestBuilder {
pub fn with_random_validators(validator_count: usize) -> Self {
let genesis_time = 10_000_000;
let keypairs: Vec<Keypair> = (0..validator_count)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not specified.
index: 0, // index verification is not specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: create_proof_of_possession(&keypair),
},
},
})
.collect();
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let spec = ChainSpec::foundation();
Self {
genesis_time,
initial_validator_deposits,
latest_eth1_data,
spec,
keypairs,
}
}
pub fn build(&self) -> Result<BeaconState, BeaconStateError> {
BeaconState::genesis(
self.genesis_time,
self.initial_validator_deposits.clone(),
self.latest_eth1_data.clone(),
&self.spec,
)
}
}
#[test]
pub fn can_produce_genesis_block() {
let builder = BeaconStateTestBuilder::with_random_validators(2);
builder.build().unwrap();
}
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}

View File

@ -1,72 +0,0 @@
#[cfg(test)]
mod tests {
use crate::{
beacon_state::BeaconStateError, BeaconState, ChainSpec, Deposit, DepositData, DepositInput,
Eth1Data, Hash256, Keypair,
};
use bls::create_proof_of_possession;
struct BeaconStateTestBuilder {
pub genesis_time: u64,
pub initial_validator_deposits: Vec<Deposit>,
pub latest_eth1_data: Eth1Data,
pub spec: ChainSpec,
pub keypairs: Vec<Keypair>,
}
impl BeaconStateTestBuilder {
pub fn with_random_validators(validator_count: usize) -> Self {
let genesis_time = 10_000_000;
let keypairs: Vec<Keypair> = (0..validator_count)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not specified.
index: 0, // index verification is not specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: create_proof_of_possession(&keypair),
},
},
})
.collect();
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let spec = ChainSpec::foundation();
Self {
genesis_time,
initial_validator_deposits,
latest_eth1_data,
spec,
keypairs,
}
}
pub fn build(&self) -> Result<BeaconState, BeaconStateError> {
BeaconState::genesis(
self.genesis_time,
self.initial_validator_deposits.clone(),
self.latest_eth1_data.clone(),
&self.spec,
)
}
}
#[test]
pub fn can_produce_genesis_block() {
let builder = BeaconStateTestBuilder::with_random_validators(2);
builder.build().unwrap();
}
}

View File

@ -2,41 +2,20 @@ use super::SlashableVoteData;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct CasperSlashing { pub struct CasperSlashing {
pub slashable_vote_data_1: SlashableVoteData, pub slashable_vote_data_1: SlashableVoteData,
pub slashable_vote_data_2: SlashableVoteData, pub slashable_vote_data_2: SlashableVoteData,
} }
impl Encodable for CasperSlashing {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slashable_vote_data_1);
s.append(&self.slashable_vote_data_2);
}
}
impl Decodable for CasperSlashing {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slashable_vote_data_1, i) = <_>::ssz_decode(bytes, i)?;
let (slashable_vote_data_2, i) = <_>::ssz_decode(bytes, i)?;
Ok((
CasperSlashing {
slashable_vote_data_1,
slashable_vote_data_2,
},
i,
))
}
}
impl TreeHash for CasperSlashing { impl TreeHash for CasperSlashing {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slashable_vote_data_1.hash_tree_root()); result.append(&mut self.slashable_vote_data_1.hash_tree_root_internal());
result.append(&mut self.slashable_vote_data_2.hash_tree_root()); result.append(&mut self.slashable_vote_data_2.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -54,7 +33,7 @@ impl<T: RngCore> TestRandom<T> for CasperSlashing {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -68,11 +47,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = CasperSlashing::random_for_test(&mut rng); let original = CasperSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,7 +1,96 @@
use crate::{Address, ChainSpec, Epoch, Hash256, Signature, Slot}; use crate::{Address, Epoch, Hash256, Slot};
use bls::Signature;
const GWEI: u64 = 1_000_000_000; const GWEI: u64 = 1_000_000_000;
/// Holds all the "constants" for a BeaconChain.
///
/// Spec v0.2.0
#[derive(PartialEq, Debug, Clone)]
pub struct ChainSpec {
/*
* Misc
*/
pub shard_count: u64,
pub target_committee_size: u64,
pub max_balance_churn_quotient: u64,
pub beacon_chain_shard_number: u64,
pub max_indices_per_slashable_vote: u64,
pub max_withdrawals_per_epoch: u64,
pub shuffle_round_count: u8,
/*
* Deposit contract
*/
pub deposit_contract_address: Address,
pub deposit_contract_tree_depth: u64,
/*
* Gwei values
*/
pub min_deposit_amount: u64,
pub max_deposit_amount: u64,
pub fork_choice_balance_increment: u64,
pub ejection_balance: u64,
/*
* Initial Values
*/
pub genesis_fork_version: u64,
pub genesis_slot: Slot,
pub genesis_epoch: Epoch,
pub genesis_start_shard: u64,
pub far_future_epoch: Epoch,
pub zero_hash: Hash256,
pub empty_signature: Signature,
pub bls_withdrawal_prefix_byte: u8,
/*
* Time parameters
*/
pub slot_duration: u64,
pub min_attestation_inclusion_delay: u64,
pub epoch_length: u64,
pub seed_lookahead: Epoch,
pub entry_exit_delay: u64,
pub eth1_data_voting_period: u64,
pub min_validator_withdrawal_epochs: Epoch,
/*
* State list lengths
*/
pub latest_block_roots_length: usize,
pub latest_randao_mixes_length: usize,
pub latest_index_roots_length: usize,
pub latest_penalized_exit_length: usize,
/*
* Reward and penalty quotients
*/
pub base_reward_quotient: u64,
pub whistleblower_reward_quotient: u64,
pub includer_reward_quotient: u64,
pub inactivity_penalty_quotient: u64,
/*
* Max operations per block
*/
pub max_proposer_slashings: u64,
pub max_attester_slashings: u64,
pub max_attestations: u64,
pub max_deposits: u64,
pub max_exits: u64,
/*
* Signature domains
*/
pub domain_deposit: u64,
pub domain_attestation: u64,
pub domain_proposal: u64,
pub domain_exit: u64,
pub domain_randao: u64,
}
impl ChainSpec { impl ChainSpec {
/// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation. /// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation.
/// ///
@ -100,6 +189,26 @@ impl ChainSpec {
} }
} }
impl ChainSpec {
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
///
/// Spec v0.2.0
pub fn few_validators() -> Self {
let genesis_slot = Slot::new(2_u64.pow(19));
let epoch_length = 8;
let genesis_epoch = genesis_slot.epoch(epoch_length);
Self {
shard_count: 1,
target_committee_size: 1,
genesis_slot,
genesis_epoch,
epoch_length,
..ChainSpec::foundation()
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -2,9 +2,10 @@ use crate::test_utils::TestRandom;
use crate::{Epoch, Hash256}; use crate::{Epoch, Hash256};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)]
pub struct Crosslink { pub struct Crosslink {
pub epoch: Epoch, pub epoch: Epoch,
pub shard_block_root: Hash256, pub shard_block_root: Hash256,
@ -20,33 +21,11 @@ impl Crosslink {
} }
} }
impl Encodable for Crosslink {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.epoch);
s.append(&self.shard_block_root);
}
}
impl Decodable for Crosslink {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (epoch, i) = <_>::ssz_decode(bytes, i)?;
let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
epoch,
shard_block_root,
},
i,
))
}
}
impl TreeHash for Crosslink { impl TreeHash for Crosslink {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.epoch.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root_internal());
result.append(&mut self.shard_block_root.hash_tree_root()); result.append(&mut self.shard_block_root.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -64,7 +43,7 @@ impl<T: RngCore> TestRandom<T> for Crosslink {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -78,11 +57,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Crosslink::random_for_test(&mut rng); let original = Crosslink::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,46 +2,22 @@ use super::{DepositData, Hash256};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct Deposit { pub struct Deposit {
pub branch: Vec<Hash256>, pub branch: Vec<Hash256>,
pub index: u64, pub index: u64,
pub deposit_data: DepositData, pub deposit_data: DepositData,
} }
impl Encodable for Deposit {
fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.branch);
s.append(&self.index);
s.append(&self.deposit_data);
}
}
impl Decodable for Deposit {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (branch, i) = <_>::ssz_decode(bytes, i)?;
let (index, i) = <_>::ssz_decode(bytes, i)?;
let (deposit_data, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
branch,
index,
deposit_data,
},
i,
))
}
}
impl TreeHash for Deposit { impl TreeHash for Deposit {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.branch.hash_tree_root()); result.append(&mut self.branch.hash_tree_root_internal());
result.append(&mut self.index.hash_tree_root()); result.append(&mut self.index.hash_tree_root_internal());
result.append(&mut self.deposit_data.hash_tree_root()); result.append(&mut self.deposit_data.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -60,7 +36,7 @@ impl<T: RngCore> TestRandom<T> for Deposit {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -74,11 +50,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Deposit::random_for_test(&mut rng); let original = Deposit::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,46 +2,22 @@ use super::DepositInput;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct DepositData { pub struct DepositData {
pub amount: u64, pub amount: u64,
pub timestamp: u64, pub timestamp: u64,
pub deposit_input: DepositInput, pub deposit_input: DepositInput,
} }
impl Encodable for DepositData {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.amount);
s.append(&self.timestamp);
s.append(&self.deposit_input);
}
}
impl Decodable for DepositData {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (amount, i) = <_>::ssz_decode(bytes, i)?;
let (timestamp, i) = <_>::ssz_decode(bytes, i)?;
let (deposit_input, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
amount,
timestamp,
deposit_input,
},
i,
))
}
}
impl TreeHash for DepositData { impl TreeHash for DepositData {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.amount.hash_tree_root()); result.append(&mut self.amount.hash_tree_root_internal());
result.append(&mut self.timestamp.hash_tree_root()); result.append(&mut self.timestamp.hash_tree_root_internal());
result.append(&mut self.deposit_input.hash_tree_root()); result.append(&mut self.deposit_input.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -60,7 +36,7 @@ impl<T: RngCore> TestRandom<T> for DepositData {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -74,11 +50,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositData::random_for_test(&mut rng); let original = DepositData::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -3,46 +3,22 @@ use crate::test_utils::TestRandom;
use bls::{PublicKey, Signature}; use bls::{PublicKey, Signature};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct DepositInput { pub struct DepositInput {
pub pubkey: PublicKey, pub pubkey: PublicKey,
pub withdrawal_credentials: Hash256, pub withdrawal_credentials: Hash256,
pub proof_of_possession: Signature, pub proof_of_possession: Signature,
} }
impl Encodable for DepositInput {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.pubkey);
s.append(&self.withdrawal_credentials);
s.append(&self.proof_of_possession);
}
}
impl Decodable for DepositInput {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (pubkey, i) = <_>::ssz_decode(bytes, i)?;
let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?;
let (proof_of_possession, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
pubkey,
withdrawal_credentials,
proof_of_possession,
},
i,
))
}
}
impl TreeHash for DepositInput { impl TreeHash for DepositInput {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.pubkey.hash_tree_root()); result.append(&mut self.pubkey.hash_tree_root_internal());
result.append(&mut self.withdrawal_credentials.hash_tree_root()); result.append(&mut self.withdrawal_credentials.hash_tree_root_internal());
result.append(&mut self.proof_of_possession.hash_tree_root()); result.append(&mut self.proof_of_possession.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -61,7 +37,7 @@ impl<T: RngCore> TestRandom<T> for DepositInput {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -75,11 +51,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositInput::random_for_test(&mut rng); let original = DepositInput::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,42 +2,21 @@ use super::Hash256;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
// Note: this is refer to as DepositRootVote in specs // Note: this is refer to as DepositRootVote in specs
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
pub struct Eth1Data { pub struct Eth1Data {
pub deposit_root: Hash256, pub deposit_root: Hash256,
pub block_hash: Hash256, pub block_hash: Hash256,
} }
impl Encodable for Eth1Data {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.deposit_root);
s.append(&self.block_hash);
}
}
impl Decodable for Eth1Data {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (deposit_root, i) = <_>::ssz_decode(bytes, i)?;
let (block_hash, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
deposit_root,
block_hash,
},
i,
))
}
}
impl TreeHash for Eth1Data { impl TreeHash for Eth1Data {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.deposit_root.hash_tree_root()); result.append(&mut self.deposit_root.hash_tree_root_internal());
result.append(&mut self.block_hash.hash_tree_root()); result.append(&mut self.block_hash.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -55,7 +34,7 @@ impl<T: RngCore> TestRandom<T> for Eth1Data {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -69,11 +48,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1Data::random_for_test(&mut rng); let original = Eth1Data::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,42 +2,21 @@ use super::Eth1Data;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
// Note: this is refer to as DepositRootVote in specs // Note: this is refer to as DepositRootVote in specs
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
pub struct Eth1DataVote { pub struct Eth1DataVote {
pub eth1_data: Eth1Data, pub eth1_data: Eth1Data,
pub vote_count: u64, pub vote_count: u64,
} }
impl Encodable for Eth1DataVote {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.eth1_data);
s.append(&self.vote_count);
}
}
impl Decodable for Eth1DataVote {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (eth1_data, i) = <_>::ssz_decode(bytes, i)?;
let (vote_count, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
eth1_data,
vote_count,
},
i,
))
}
}
impl TreeHash for Eth1DataVote { impl TreeHash for Eth1DataVote {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.eth1_data.hash_tree_root()); result.append(&mut self.eth1_data.hash_tree_root_internal());
result.append(&mut self.vote_count.hash_tree_root()); result.append(&mut self.vote_count.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -55,7 +34,7 @@ impl<T: RngCore> TestRandom<T> for Eth1DataVote {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -69,11 +48,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1DataVote::random_for_test(&mut rng); let original = Eth1DataVote::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,46 +2,22 @@ use crate::{test_utils::TestRandom, Epoch};
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct Exit { pub struct Exit {
pub epoch: Epoch, pub epoch: Epoch,
pub validator_index: u64, pub validator_index: u64,
pub signature: Signature, pub signature: Signature,
} }
impl Encodable for Exit {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.epoch);
s.append(&self.validator_index);
s.append(&self.signature);
}
}
impl Decodable for Exit {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (epoch, i) = <_>::ssz_decode(bytes, i)?;
let (validator_index, i) = <_>::ssz_decode(bytes, i)?;
let (signature, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
epoch,
validator_index,
signature,
},
i,
))
}
}
impl TreeHash for Exit { impl TreeHash for Exit {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.epoch.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root_internal());
result.append(&mut self.validator_index.hash_tree_root()); result.append(&mut self.validator_index.hash_tree_root_internal());
result.append(&mut self.signature.hash_tree_root()); result.append(&mut self.signature.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -60,7 +36,7 @@ impl<T: RngCore> TestRandom<T> for Exit {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -74,11 +50,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Exit::random_for_test(&mut rng); let original = Exit::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,46 +1,22 @@
use crate::{test_utils::TestRandom, Epoch}; use crate::{test_utils::TestRandom, Epoch};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, Clone, PartialEq, Default, Serialize)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)]
pub struct Fork { pub struct Fork {
pub previous_version: u64, pub previous_version: u64,
pub current_version: u64, pub current_version: u64,
pub epoch: Epoch, pub epoch: Epoch,
} }
impl Encodable for Fork {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.previous_version);
s.append(&self.current_version);
s.append(&self.epoch);
}
}
impl Decodable for Fork {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (previous_version, i) = <_>::ssz_decode(bytes, i)?;
let (current_version, i) = <_>::ssz_decode(bytes, i)?;
let (epoch, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
previous_version,
current_version,
epoch,
},
i,
))
}
}
impl TreeHash for Fork { impl TreeHash for Fork {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.previous_version.hash_tree_root()); result.append(&mut self.previous_version.hash_tree_root_internal());
result.append(&mut self.current_version.hash_tree_root()); result.append(&mut self.current_version.hash_tree_root_internal());
result.append(&mut self.epoch.hash_tree_root()); result.append(&mut self.epoch.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -59,7 +35,7 @@ impl<T: RngCore> TestRandom<T> for Fork {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -73,11 +49,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Fork::random_for_test(&mut rng); let original = Fork::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -7,8 +7,8 @@ pub mod attester_slashing;
pub mod beacon_block; pub mod beacon_block;
pub mod beacon_block_body; pub mod beacon_block_body;
pub mod beacon_state; pub mod beacon_state;
pub mod beacon_state_tests;
pub mod casper_slashing; pub mod casper_slashing;
pub mod chain_spec;
pub mod crosslink; pub mod crosslink;
pub mod deposit; pub mod deposit;
pub mod deposit_data; pub mod deposit_data;
@ -29,7 +29,6 @@ pub mod slashable_vote_data;
pub mod slot_epoch_macros; pub mod slot_epoch_macros;
pub mod slot_epoch; pub mod slot_epoch;
pub mod slot_height; pub mod slot_height;
pub mod spec;
pub mod validator; pub mod validator;
pub mod validator_registry; pub mod validator_registry;
pub mod validator_registry_delta_block; pub mod validator_registry_delta_block;
@ -45,6 +44,7 @@ pub use crate::beacon_block::BeaconBlock;
pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_body::BeaconBlockBody;
pub use crate::beacon_state::BeaconState; pub use crate::beacon_state::BeaconState;
pub use crate::casper_slashing::CasperSlashing; pub use crate::casper_slashing::CasperSlashing;
pub use crate::chain_spec::ChainSpec;
pub use crate::crosslink::Crosslink; pub use crate::crosslink::Crosslink;
pub use crate::deposit::Deposit; pub use crate::deposit::Deposit;
pub use crate::deposit_data::DepositData; pub use crate::deposit_data::DepositData;
@ -61,7 +61,6 @@ pub use crate::slashable_attestation::SlashableAttestation;
pub use crate::slashable_vote_data::SlashableVoteData; pub use crate::slashable_vote_data::SlashableVoteData;
pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_epoch::{Epoch, Slot};
pub use crate::slot_height::SlotHeight; pub use crate::slot_height::SlotHeight;
pub use crate::spec::ChainSpec;
pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator}; pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator};
pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock; pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock;

View File

@ -2,9 +2,10 @@ use crate::test_utils::TestRandom;
use crate::{AttestationData, Bitfield, Slot}; use crate::{AttestationData, Bitfield, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, Clone, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)]
pub struct PendingAttestation { pub struct PendingAttestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData, pub data: AttestationData,
@ -12,41 +13,13 @@ pub struct PendingAttestation {
pub inclusion_slot: Slot, pub inclusion_slot: Slot,
} }
impl Encodable for PendingAttestation {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.aggregation_bitfield);
s.append(&self.data);
s.append(&self.custody_bitfield);
s.append(&self.inclusion_slot);
}
}
impl Decodable for PendingAttestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (aggregation_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (data, i) = <_>::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (inclusion_slot, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
data,
aggregation_bitfield,
custody_bitfield,
inclusion_slot,
},
i,
))
}
}
impl TreeHash for PendingAttestation { impl TreeHash for PendingAttestation {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.aggregation_bitfield.hash_tree_root()); result.append(&mut self.aggregation_bitfield.hash_tree_root_internal());
result.append(&mut self.data.hash_tree_root()); result.append(&mut self.data.hash_tree_root_internal());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.custody_bitfield.hash_tree_root_internal());
result.append(&mut self.inclusion_slot.hash_tree_root()); result.append(&mut self.inclusion_slot.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -66,7 +39,7 @@ impl<T: RngCore> TestRandom<T> for PendingAttestation {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -80,11 +53,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = PendingAttestation::random_for_test(&mut rng); let original = PendingAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,46 +2,22 @@ use crate::test_utils::TestRandom;
use crate::{Hash256, Slot}; use crate::{Hash256, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Default, Serialize)] #[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
pub struct ProposalSignedData { pub struct ProposalSignedData {
pub slot: Slot, pub slot: Slot,
pub shard: u64, pub shard: u64,
pub block_root: Hash256, pub block_root: Hash256,
} }
impl Encodable for ProposalSignedData {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.shard);
s.append(&self.block_root);
}
}
impl Decodable for ProposalSignedData {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = <_>::ssz_decode(bytes, i)?;
let (shard, i) = <_>::ssz_decode(bytes, i)?;
let (block_root, i) = <_>::ssz_decode(bytes, i)?;
Ok((
ProposalSignedData {
slot,
shard,
block_root,
},
i,
))
}
}
impl TreeHash for ProposalSignedData { impl TreeHash for ProposalSignedData {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
result.append(&mut self.shard.hash_tree_root()); result.append(&mut self.shard.hash_tree_root_internal());
result.append(&mut self.block_root.hash_tree_root()); result.append(&mut self.block_root.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -60,7 +36,7 @@ impl<T: RngCore> TestRandom<T> for ProposalSignedData {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -74,11 +50,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposalSignedData::random_for_test(&mut rng); let original = ProposalSignedData::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -3,9 +3,10 @@ use crate::test_utils::TestRandom;
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct ProposerSlashing { pub struct ProposerSlashing {
pub proposer_index: u64, pub proposer_index: u64,
pub proposal_data_1: ProposalSignedData, pub proposal_data_1: ProposalSignedData,
@ -14,45 +15,14 @@ pub struct ProposerSlashing {
pub proposal_signature_2: Signature, pub proposal_signature_2: Signature,
} }
impl Encodable for ProposerSlashing {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.proposer_index);
s.append(&self.proposal_data_1);
s.append(&self.proposal_signature_1);
s.append(&self.proposal_data_2);
s.append(&self.proposal_signature_2);
}
}
impl Decodable for ProposerSlashing {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (proposer_index, i) = <_>::ssz_decode(bytes, i)?;
let (proposal_data_1, i) = <_>::ssz_decode(bytes, i)?;
let (proposal_signature_1, i) = <_>::ssz_decode(bytes, i)?;
let (proposal_data_2, i) = <_>::ssz_decode(bytes, i)?;
let (proposal_signature_2, i) = <_>::ssz_decode(bytes, i)?;
Ok((
ProposerSlashing {
proposer_index,
proposal_data_1,
proposal_signature_1,
proposal_data_2,
proposal_signature_2,
},
i,
))
}
}
impl TreeHash for ProposerSlashing { impl TreeHash for ProposerSlashing {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.proposer_index.hash_tree_root()); result.append(&mut self.proposer_index.hash_tree_root_internal());
result.append(&mut self.proposal_data_1.hash_tree_root()); result.append(&mut self.proposal_data_1.hash_tree_root_internal());
result.append(&mut self.proposal_signature_1.hash_tree_root()); result.append(&mut self.proposal_signature_1.hash_tree_root_internal());
result.append(&mut self.proposal_data_2.hash_tree_root()); result.append(&mut self.proposal_data_2.hash_tree_root_internal());
result.append(&mut self.proposal_signature_2.hash_tree_root()); result.append(&mut self.proposal_signature_2.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -73,7 +43,7 @@ impl<T: RngCore> TestRandom<T> for ProposerSlashing {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -87,11 +57,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposerSlashing::random_for_test(&mut rng); let original = ProposerSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,46 +1,22 @@
use crate::{test_utils::TestRandom, Slot}; use crate::{test_utils::TestRandom, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct ShardReassignmentRecord { pub struct ShardReassignmentRecord {
pub validator_index: u64, pub validator_index: u64,
pub shard: u64, pub shard: u64,
pub slot: Slot, pub slot: Slot,
} }
impl Encodable for ShardReassignmentRecord {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.validator_index);
s.append(&self.shard);
s.append(&self.slot);
}
}
impl Decodable for ShardReassignmentRecord {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (validator_index, i) = <_>::ssz_decode(bytes, i)?;
let (shard, i) = <_>::ssz_decode(bytes, i)?;
let (slot, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
validator_index,
shard,
slot,
},
i,
))
}
}
impl TreeHash for ShardReassignmentRecord { impl TreeHash for ShardReassignmentRecord {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.validator_index.hash_tree_root()); result.append(&mut self.validator_index.hash_tree_root_internal());
result.append(&mut self.shard.hash_tree_root()); result.append(&mut self.shard.hash_tree_root_internal());
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -59,7 +35,7 @@ impl<T: RngCore> TestRandom<T> for ShardReassignmentRecord {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -73,11 +49,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardReassignmentRecord::random_for_test(&mut rng); let original = ShardReassignmentRecord::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,9 +1,10 @@
use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield}; use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct SlashableAttestation { pub struct SlashableAttestation {
pub validator_indices: Vec<u64>, pub validator_indices: Vec<u64>,
pub data: AttestationData, pub data: AttestationData,
@ -11,41 +12,13 @@ pub struct SlashableAttestation {
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
impl Encodable for SlashableAttestation {
fn ssz_append(&self, s: &mut SszStream) {
s.append_vec(&self.validator_indices);
s.append(&self.data);
s.append(&self.custody_bitfield);
s.append(&self.aggregate_signature);
}
}
impl Decodable for SlashableAttestation {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (validator_indices, i) = <_>::ssz_decode(bytes, i)?;
let (data, i) = <_>::ssz_decode(bytes, i)?;
let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?;
let (aggregate_signature, i) = <_>::ssz_decode(bytes, i)?;
Ok((
SlashableAttestation {
validator_indices,
data,
custody_bitfield,
aggregate_signature,
},
i,
))
}
}
impl TreeHash for SlashableAttestation { impl TreeHash for SlashableAttestation {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.validator_indices.hash_tree_root()); result.append(&mut self.validator_indices.hash_tree_root_internal());
result.append(&mut self.data.hash_tree_root()); result.append(&mut self.data.hash_tree_root_internal());
result.append(&mut self.custody_bitfield.hash_tree_root()); result.append(&mut self.custody_bitfield.hash_tree_root_internal());
result.append(&mut self.aggregate_signature.hash_tree_root()); result.append(&mut self.aggregate_signature.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -65,7 +38,7 @@ impl<T: RngCore> TestRandom<T> for SlashableAttestation {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -79,11 +52,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng); let original = SlashableAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,11 +1,13 @@
use super::AttestationData; use super::AttestationData;
use crate::chain_spec::ChainSpec;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use bls::AggregateSignature; use bls::AggregateSignature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
pub struct SlashableVoteData { pub struct SlashableVoteData {
pub custody_bit_0_indices: Vec<u32>, pub custody_bit_0_indices: Vec<u32>,
pub custody_bit_1_indices: Vec<u32>, pub custody_bit_1_indices: Vec<u32>,
@ -13,41 +15,34 @@ pub struct SlashableVoteData {
pub aggregate_signature: AggregateSignature, pub aggregate_signature: AggregateSignature,
} }
impl Encodable for SlashableVoteData { impl SlashableVoteData {
fn ssz_append(&self, s: &mut SszStream) { /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
s.append_vec(&self.custody_bit_0_indices); ///
s.append_vec(&self.custody_bit_1_indices); /// Spec v0.3.0
s.append(&self.data); pub fn is_double_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
s.append(&self.aggregate_signature); self.data.slot.epoch(spec.epoch_length) == other.data.slot.epoch(spec.epoch_length)
} }
}
impl Decodable for SlashableVoteData { /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { ///
let (custody_bit_0_indices, i) = <_>::ssz_decode(bytes, i)?; /// Spec v0.3.0
let (custody_bit_1_indices, i) = <_>::ssz_decode(bytes, i)?; pub fn is_surround_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
let (data, i) = <_>::ssz_decode(bytes, i)?; let source_epoch_1 = self.data.justified_epoch;
let (aggregate_signature, i) = <_>::ssz_decode(bytes, i)?; let source_epoch_2 = other.data.justified_epoch;
let target_epoch_1 = self.data.slot.epoch(spec.epoch_length);
let target_epoch_2 = other.data.slot.epoch(spec.epoch_length);
Ok(( (source_epoch_1 < source_epoch_2) && (target_epoch_2 < target_epoch_1)
SlashableVoteData {
custody_bit_0_indices,
custody_bit_1_indices,
data,
aggregate_signature,
},
i,
))
} }
} }
impl TreeHash for SlashableVoteData { impl TreeHash for SlashableVoteData {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.custody_bit_0_indices.hash_tree_root()); result.append(&mut self.custody_bit_0_indices.hash_tree_root_internal());
result.append(&mut self.custody_bit_1_indices.hash_tree_root()); result.append(&mut self.custody_bit_1_indices.hash_tree_root_internal());
result.append(&mut self.data.hash_tree_root()); result.append(&mut self.data.hash_tree_root_internal());
result.append(&mut self.aggregate_signature.hash_tree_root()); result.append(&mut self.aggregate_signature.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -66,8 +61,82 @@ impl<T: RngCore> TestRandom<T> for SlashableVoteData {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::chain_spec::ChainSpec;
use crate::slot_epoch::{Epoch, Slot};
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test]
pub fn test_is_double_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
true
)
}
#[test]
pub fn test_is_double_vote_false() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_true_realistic() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(4, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(3, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_false_source_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 2, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_false_target_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -81,14 +150,27 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableVoteData::random_for_test(&mut rng); let original = SlashableVoteData::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170 // https://github.com/sigp/lighthouse/issues/170
} }
fn create_slashable_vote_data(
slot_factor: u64,
justified_epoch: u64,
spec: &ChainSpec,
) -> SlashableVoteData {
let mut rng = XorShiftRng::from_seed([42; 16]);
let mut slashable_vote = SlashableVoteData::random_for_test(&mut rng);
slashable_vote.data.slot = Slot::new(slot_factor * spec.epoch_length);
slashable_vote.data.justified_epoch = Epoch::new(justified_epoch);
slashable_vote
}
} }

View File

@ -25,12 +25,14 @@ macro_rules! impl_into_u32 {
($main: ident) => { ($main: ident) => {
impl Into<u32> for $main { impl Into<u32> for $main {
fn into(self) -> u32 { fn into(self) -> u32 {
assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32");
self.0 as u32 self.0 as u32
} }
} }
impl $main { impl $main {
pub fn as_u32(&self) -> u32 { pub fn as_u32(&self) -> u32 {
assert!(self.0 < u64::from(std::u32::MAX), "Lossy conversion to u32");
self.0 as u32 self.0 as u32
} }
} }
@ -224,9 +226,9 @@ macro_rules! impl_ssz {
} }
impl TreeHash for $type { impl TreeHash for $type {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.0.hash_tree_root()); result.append(&mut self.0.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -560,11 +562,11 @@ macro_rules! ssz_tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng); let original = $type::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,92 +0,0 @@
mod foundation;
use crate::{Address, Epoch, Hash256, Slot};
use bls::Signature;
/// Holds all the "constants" for a BeaconChain.
///
/// Spec v0.2.0
#[derive(PartialEq, Debug, Clone)]
pub struct ChainSpec {
/*
* Misc
*/
pub shard_count: u64,
pub target_committee_size: u64,
pub max_balance_churn_quotient: u64,
pub beacon_chain_shard_number: u64,
pub max_indices_per_slashable_vote: u64,
pub max_withdrawals_per_epoch: u64,
pub shuffle_round_count: u64,
/*
* Deposit contract
*/
pub deposit_contract_address: Address,
pub deposit_contract_tree_depth: u64,
/*
* Gwei values
*/
pub min_deposit_amount: u64,
pub max_deposit_amount: u64,
pub fork_choice_balance_increment: u64,
pub ejection_balance: u64,
/*
* Initial Values
*/
pub genesis_fork_version: u64,
pub genesis_slot: Slot,
pub genesis_epoch: Epoch,
pub genesis_start_shard: u64,
pub far_future_epoch: Epoch,
pub zero_hash: Hash256,
pub empty_signature: Signature,
pub bls_withdrawal_prefix_byte: u8,
/*
* Time parameters
*/
pub slot_duration: u64,
pub min_attestation_inclusion_delay: u64,
pub epoch_length: u64,
pub seed_lookahead: Epoch,
pub entry_exit_delay: u64,
pub eth1_data_voting_period: u64,
pub min_validator_withdrawal_epochs: Epoch,
/*
* State list lengths
*/
pub latest_block_roots_length: usize,
pub latest_randao_mixes_length: usize,
pub latest_index_roots_length: usize,
pub latest_penalized_exit_length: usize,
/*
* Reward and penalty quotients
*/
pub base_reward_quotient: u64,
pub whistleblower_reward_quotient: u64,
pub includer_reward_quotient: u64,
pub inactivity_penalty_quotient: u64,
/*
* Max operations per block
*/
pub max_proposer_slashings: u64,
pub max_attester_slashings: u64,
pub max_attestations: u64,
pub max_deposits: u64,
pub max_exits: u64,
/*
* Signature domains
*/
pub domain_deposit: u64,
pub domain_attestation: u64,
pub domain_proposal: u64,
pub domain_exit: u64,
pub domain_randao: u64,
}

View File

@ -122,15 +122,17 @@ impl Decodable for Validator {
} }
impl TreeHash for Validator { impl TreeHash for Validator {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.pubkey.hash_tree_root()); result.append(&mut self.pubkey.hash_tree_root_internal());
result.append(&mut self.withdrawal_credentials.hash_tree_root()); result.append(&mut self.withdrawal_credentials.hash_tree_root_internal());
result.append(&mut self.activation_epoch.hash_tree_root()); result.append(&mut self.activation_epoch.hash_tree_root_internal());
result.append(&mut self.exit_epoch.hash_tree_root()); result.append(&mut self.exit_epoch.hash_tree_root_internal());
result.append(&mut self.withdrawal_epoch.hash_tree_root()); result.append(&mut self.withdrawal_epoch.hash_tree_root_internal());
result.append(&mut self.penalized_epoch.hash_tree_root()); result.append(&mut self.penalized_epoch.hash_tree_root_internal());
result.append(&mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root()); result.append(
&mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root_internal(),
);
hash(&result) hash(&result)
} }
} }
@ -190,11 +192,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Validator::random_for_test(&mut rng); let original = Validator::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -2,10 +2,11 @@ use crate::{test_utils::TestRandom, Hash256, Slot};
use bls::PublicKey; use bls::PublicKey;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, TreeHash};
use ssz_derive::{Decode, Encode};
// The information gathered from the PoW chain validator registration function. // The information gathered from the PoW chain validator registration function.
#[derive(Debug, Clone, PartialEq, Serialize)] #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)]
pub struct ValidatorRegistryDeltaBlock { pub struct ValidatorRegistryDeltaBlock {
pub latest_registry_delta_root: Hash256, pub latest_registry_delta_root: Hash256,
pub validator_index: u32, pub validator_index: u32,
@ -27,45 +28,14 @@ impl Default for ValidatorRegistryDeltaBlock {
} }
} }
impl Encodable for ValidatorRegistryDeltaBlock {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.latest_registry_delta_root);
s.append(&self.validator_index);
s.append(&self.pubkey);
s.append(&self.slot);
s.append(&self.flag);
}
}
impl Decodable for ValidatorRegistryDeltaBlock {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (latest_registry_delta_root, i) = <_>::ssz_decode(bytes, i)?;
let (validator_index, i) = <_>::ssz_decode(bytes, i)?;
let (pubkey, i) = <_>::ssz_decode(bytes, i)?;
let (slot, i) = <_>::ssz_decode(bytes, i)?;
let (flag, i) = <_>::ssz_decode(bytes, i)?;
Ok((
Self {
latest_registry_delta_root,
validator_index,
pubkey,
slot,
flag,
},
i,
))
}
}
impl TreeHash for ValidatorRegistryDeltaBlock { impl TreeHash for ValidatorRegistryDeltaBlock {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.latest_registry_delta_root.hash_tree_root()); result.append(&mut self.latest_registry_delta_root.hash_tree_root_internal());
result.append(&mut self.validator_index.hash_tree_root()); result.append(&mut self.validator_index.hash_tree_root_internal());
result.append(&mut self.pubkey.hash_tree_root()); result.append(&mut self.pubkey.hash_tree_root_internal());
result.append(&mut self.slot.hash_tree_root()); result.append(&mut self.slot.hash_tree_root_internal());
result.append(&mut self.flag.hash_tree_root()); result.append(&mut self.flag.hash_tree_root_internal());
hash(&result) hash(&result)
} }
} }
@ -86,7 +56,7 @@ impl<T: RngCore> TestRandom<T> for ValidatorRegistryDeltaBlock {
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode; use ssz::{ssz_encode, Decodable};
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -100,11 +70,11 @@ mod tests {
} }
#[test] #[test]
pub fn test_hash_tree_root() { pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng); let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng);
let result = original.hash_tree_root(); let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -57,7 +57,7 @@ impl Serialize for AggregateSignature {
} }
impl TreeHash for AggregateSignature { impl TreeHash for AggregateSignature {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
hash(&self.0.as_bytes()) hash(&self.0.as_bytes())
} }
} }

View File

@ -66,7 +66,7 @@ impl Serialize for PublicKey {
} }
impl TreeHash for PublicKey { impl TreeHash for PublicKey {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
hash(&self.0.as_bytes()) hash(&self.0.as_bytes())
} }
} }

View File

@ -41,7 +41,7 @@ impl Decodable for SecretKey {
} }
impl TreeHash for SecretKey { impl TreeHash for SecretKey {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
self.0.as_bytes().clone() self.0.as_bytes().clone()
} }
} }

View File

@ -61,7 +61,7 @@ impl Decodable for Signature {
} }
impl TreeHash for Signature { impl TreeHash for Signature {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
hash(&self.0.as_bytes()) hash(&self.0.as_bytes())
} }
} }

View File

@ -187,8 +187,8 @@ impl Serialize for BooleanBitfield {
} }
impl ssz::TreeHash for BooleanBitfield { impl ssz::TreeHash for BooleanBitfield {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
self.to_bytes().hash_tree_root() self.to_bytes().hash_tree_root_internal()
} }
} }

View File

@ -39,6 +39,21 @@ impl Decodable for u8 {
} }
} }
impl Decodable for bool {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
if index >= bytes.len() {
Err(DecodeError::TooShort)
} else {
let result = match bytes[index] {
0b0000_0000 => false,
0b1000_0000 => true,
_ => return Err(DecodeError::Invalid),
};
Ok((result, index + 1))
}
}
}
impl Decodable for H256 { impl Decodable for H256 {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
if bytes.len() < 32 || bytes.len() - 32 < index { if bytes.len() < 32 || bytes.len() - 32 < index {
@ -215,4 +230,20 @@ mod tests {
let result: u16 = decode_ssz(&vec![0, 0, 0, 0, 1], 3).unwrap().0; let result: u16 = decode_ssz(&vec![0, 0, 0, 0, 1], 3).unwrap().0;
assert_eq!(result, 1); assert_eq!(result, 1);
} }
#[test]
fn test_decode_ssz_bool() {
let ssz = vec![0b0000_0000, 0b1000_0000];
let (result, index): (bool, usize) = decode_ssz(&ssz, 0).unwrap();
assert_eq!(index, 1);
assert_eq!(result, false);
let (result, index): (bool, usize) = decode_ssz(&ssz, 1).unwrap();
assert_eq!(index, 2);
assert_eq!(result, true);
let ssz = vec![0b0100_0000];
let result: Result<(bool, usize), DecodeError> = decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::Invalid));
}
} }

View File

@ -46,6 +46,13 @@ impl_encodable_for_uint!(u32, 32);
impl_encodable_for_uint!(u64, 64); impl_encodable_for_uint!(u64, 64);
impl_encodable_for_uint!(usize, 64); impl_encodable_for_uint!(usize, 64);
impl Encodable for bool {
fn ssz_append(&self, s: &mut SszStream) {
let byte = if *self { 0b1000_0000 } else { 0b0000_0000 };
s.append_encoded_raw(&[byte]);
}
}
impl Encodable for H256 { impl Encodable for H256 {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append_encoded_raw(&self.to_vec()); s.append_encoded_raw(&self.to_vec());
@ -206,4 +213,17 @@ mod tests {
ssz.append(&x); ssz.append(&x);
assert_eq!(ssz.drain(), vec![255, 255, 255, 255, 255, 255, 255, 255]); assert_eq!(ssz.drain(), vec![255, 255, 255, 255, 255, 255, 255, 255]);
} }
#[test]
fn test_ssz_encode_bool() {
let x: bool = false;
let mut ssz = SszStream::new();
ssz.append(&x);
assert_eq!(ssz.drain(), vec![0b0000_0000]);
let x: bool = true;
let mut ssz = SszStream::new();
ssz.append(&x);
assert_eq!(ssz.drain(), vec![0b1000_0000]);
}
} }

View File

@ -3,49 +3,49 @@ use super::{merkle_hash, ssz_encode, TreeHash};
use hashing::hash; use hashing::hash;
impl TreeHash for u8 { impl TreeHash for u8 {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for u16 { impl TreeHash for u16 {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for u32 { impl TreeHash for u32 {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for u64 { impl TreeHash for u64 {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for usize { impl TreeHash for usize {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for Address { impl TreeHash for Address {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for H256 { impl TreeHash for H256 {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
ssz_encode(self) ssz_encode(self)
} }
} }
impl TreeHash for [u8] { impl TreeHash for [u8] {
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
if self.len() > 32 { if self.len() > 32 {
return hash(&self); return hash(&self);
} }
@ -57,12 +57,12 @@ impl<T> TreeHash for Vec<T>
where where
T: TreeHash, T: TreeHash,
{ {
/// Returns the merkle_hash of a list of hash_tree_root values created /// Returns the merkle_hash of a list of hash_tree_root_internal values created
/// from the given list. /// from the given list.
/// Note: A byte vector, Vec<u8>, must be converted to a slice (as_slice()) /// Note: A byte vector, Vec<u8>, must be converted to a slice (as_slice())
/// to be handled properly (i.e. hashed) as byte array. /// to be handled properly (i.e. hashed) as byte array.
fn hash_tree_root(&self) -> Vec<u8> { fn hash_tree_root_internal(&self) -> Vec<u8> {
let mut tree_hashes = self.iter().map(|x| x.hash_tree_root()).collect(); let mut tree_hashes = self.iter().map(|x| x.hash_tree_root_internal()).collect();
merkle_hash(&mut tree_hashes) merkle_hash(&mut tree_hashes)
} }
} }
@ -73,7 +73,7 @@ mod tests {
#[test] #[test]
fn test_impl_tree_hash_vec() { fn test_impl_tree_hash_vec() {
let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root(); let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root_internal();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
} }
} }

View File

@ -4,7 +4,14 @@ const SSZ_CHUNK_SIZE: usize = 128;
const HASHSIZE: usize = 32; const HASHSIZE: usize = 32;
pub trait TreeHash { pub trait TreeHash {
fn hash_tree_root(&self) -> Vec<u8>; fn hash_tree_root_internal(&self) -> Vec<u8>;
fn hash_tree_root(&self) -> Vec<u8> {
let mut result = self.hash_tree_root_internal();
if result.len() < HASHSIZE {
zpad(&mut result, HASHSIZE);
}
result
}
} }
/// Returns a 32 byte hash of 'list' - a vector of byte vectors. /// Returns a 32 byte hash of 'list' - a vector of byte vectors.
@ -14,7 +21,8 @@ pub fn merkle_hash(list: &mut Vec<Vec<u8>>) -> Vec<u8> {
let (mut chunk_size, mut chunkz) = list_to_blob(list); let (mut chunk_size, mut chunkz) = list_to_blob(list);
// get data_len as bytes. It will hashed will the merkle root // get data_len as bytes. It will hashed will the merkle root
let datalen = list.len().to_le_bytes(); let mut datalen = list.len().to_le_bytes().to_vec();
zpad(&mut datalen, 32);
// Tree-hash // Tree-hash
while chunkz.len() > HASHSIZE { while chunkz.len() > HASHSIZE {
@ -36,33 +44,68 @@ pub fn merkle_hash(list: &mut Vec<Vec<u8>>) -> Vec<u8> {
chunkz = new_chunkz; chunkz = new_chunkz;
} }
chunkz.append(&mut datalen.to_vec()); chunkz.append(&mut datalen);
hash(&chunkz) hash(&chunkz)
} }
fn list_to_blob(list: &mut Vec<Vec<u8>>) -> (usize, Vec<u8>) { fn list_to_blob(list: &mut Vec<Vec<u8>>) -> (usize, Vec<u8>) {
let chunk_size = if list.is_empty() { let chunk_size = if list.is_empty() || list[0].len() < SSZ_CHUNK_SIZE {
SSZ_CHUNK_SIZE SSZ_CHUNK_SIZE
} else if list[0].len() < SSZ_CHUNK_SIZE {
let items_per_chunk = SSZ_CHUNK_SIZE / list[0].len();
items_per_chunk * list[0].len()
} else { } else {
list[0].len() list[0].len()
}; };
let mut data = Vec::new(); let (items_per_chunk, chunk_count) = if list.is_empty() {
(1, 1)
} else {
let items_per_chunk = SSZ_CHUNK_SIZE / list[0].len();
let chunk_count = list.len() / items_per_chunk;
(items_per_chunk, chunk_count)
};
let mut chunkz = Vec::new();
if list.is_empty() { if list.is_empty() {
// handle and empty list // handle and empty list
data.append(&mut vec![0; SSZ_CHUNK_SIZE]); chunkz.append(&mut vec![0; SSZ_CHUNK_SIZE]);
} else { } else if list[0].len() <= SSZ_CHUNK_SIZE {
// just create a blob here; we'll divide into // just create a blob here; we'll divide into
// chunked slices when we merklize // chunked slices when we merklize
data.reserve(list[0].len() * list.len()); let mut chunk = Vec::with_capacity(chunk_size);
let mut item_count_in_chunk = 0;
chunkz.reserve(chunk_count * chunk_size);
for item in list.iter_mut() { for item in list.iter_mut() {
data.append(item); item_count_in_chunk += 1;
chunk.append(item);
// completed chunk?
if item_count_in_chunk == items_per_chunk {
zpad(&mut chunk, chunk_size);
chunkz.append(&mut chunk);
item_count_in_chunk = 0;
} }
} }
(chunk_size, data)
// left-over uncompleted chunk?
if item_count_in_chunk != 0 {
zpad(&mut chunk, chunk_size);
chunkz.append(&mut chunk);
}
} else {
// chunks larger than SSZ_CHUNK_SIZE
chunkz.reserve(chunk_count * chunk_size);
for item in list.iter_mut() {
chunkz.append(item);
}
}
(chunk_size, chunkz)
}
/// right pads with zeros making 'bytes' 'size' in length
fn zpad(bytes: &mut Vec<u8>, size: usize) {
if bytes.len() < size {
bytes.resize(size, 0);
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -0,0 +1,14 @@
[package]
name = "ssz_derive"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
description = "Procedural derive macros for SSZ encoding and decoding."
[lib]
proc-macro = true
[dependencies]
syn = "0.15"
quote = "0.6"
ssz = { path = "../ssz" }

View File

@ -0,0 +1,128 @@
//! Provides the following procedural derive macros:
//!
//! - `#[derive(Encode)]`
//! - `#[derive(Decode)]`
//!
//! These macros provide SSZ encoding/decoding for a `struct`. Fields are encoded/decoded in the
//! order they are defined.
//!
//! Presently, only `structs` with named fields are supported. `enum`s and tuple-structs are
//! unsupported.
//!
//! Example:
//! ```
//! use ssz::{ssz_encode, Decodable};
//! use ssz_derive::{Encode, Decode};
//!
//! #[derive(Encode, Decode)]
//! struct Foo {
//! pub bar: bool,
//! pub baz: u64,
//! }
//!
//! fn main() {
//! let foo = Foo {
//! bar: true,
//! baz: 42,
//! };
//!
//! let bytes = ssz_encode(&foo);
//!
//! let (decoded_foo, _i) = Foo::ssz_decode(&bytes, 0).unwrap();
//!
//! assert_eq!(foo.baz, decoded_foo.baz);
//! }
//! ```
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, DeriveInput};
/// Returns a Vec of `syn::Ident` for each named field in the struct.
///
/// # Panics
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
fn get_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> {
struct_data
.fields
.iter()
.map(|f| match &f.ident {
Some(ref ident) => ident,
_ => panic!("ssz_derive only supports named struct fields."),
})
.collect()
}
/// Implements `ssz::Encodable` for some `struct`.
///
/// Fields are encoded in the order they are defined.
#[proc_macro_derive(Encode)]
pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let name = &item.ident;
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
_ => panic!("ssz_derive only supports structs."),
};
let field_idents = get_named_field_idents(&struct_data);
let output = quote! {
impl ssz::Encodable for #name {
fn ssz_append(&self, s: &mut ssz::SszStream) {
#(
s.append(&self.#field_idents);
)*
}
}
};
output.into()
}
/// Implements `ssz::Decodable` for some `struct`.
///
/// Fields are decoded in the order they are defined.
#[proc_macro_derive(Decode)]
pub fn ssz_decode_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let name = &item.ident;
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
_ => panic!("ssz_derive only supports structs."),
};
let field_idents = get_named_field_idents(&struct_data);
// Using a var in an iteration always consumes the var, therefore we must make a `fields_a` and
// a `fields_b` in order to perform two loops.
//
// https://github.com/dtolnay/quote/issues/8
let field_idents_a = &field_idents;
let field_idents_b = &field_idents;
let output = quote! {
impl ssz::Decodable for #name {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> {
#(
let (#field_idents_a, i) = <_>::ssz_decode(bytes, i)?;
)*
Ok((
Self {
#(
#field_idents_b,
)*
},
i
))
}
}
};
output.into()
}

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
block_producer = { path = "../eth2/block_producer" } block_proposer = { path = "../eth2/block_proposer" }
bls = { path = "../eth2/utils/bls" } bls = { path = "../eth2/utils/bls" }
clap = "2.32.0" clap = "2.32.0"
dirs = "1.0.3" dirs = "1.0.3"

View File

@ -1,4 +1,4 @@
use block_producer::{BeaconNode, BeaconNodeError, PublishOutcome}; use block_proposer::{BeaconNode, BeaconNodeError, PublishOutcome};
use protos::services::{ use protos::services::{
BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest, BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest,
}; };

View File

@ -1,7 +1,7 @@
mod beacon_block_grpc_client; mod beacon_block_grpc_client;
// mod block_producer_service; // mod block_producer_service;
use block_producer::{ use block_proposer::{
BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer, BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer,
}; };
use slog::{error, info, warn, Logger}; use slog::{error, info, warn, Logger};

View File

@ -1,4 +1,4 @@
use block_producer::{DutiesReader, DutiesReaderError}; use block_proposer::{DutiesReader, DutiesReaderError};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::RwLock; use std::sync::RwLock;
use types::{Epoch, Slot}; use types::{Epoch, Slot};

View File

@ -1,7 +1,7 @@
use self::block_producer_service::{BeaconBlockGrpcClient, BlockProducerService}; use self::block_producer_service::{BeaconBlockGrpcClient, BlockProducerService};
use self::duties::{DutiesManager, DutiesManagerService, EpochDutiesMap}; use self::duties::{DutiesManager, DutiesManagerService, EpochDutiesMap};
use crate::config::ClientConfig; use crate::config::ClientConfig;
use block_producer::{test_utils::LocalSigner, BlockProducer}; use block_proposer::{test_utils::LocalSigner, BlockProducer};
use bls::Keypair; use bls::Keypair;
use clap::{App, Arg}; use clap::{App, Arg};
use grpcio::{ChannelBuilder, EnvBuilder}; use grpcio::{ChannelBuilder, EnvBuilder};