Merge branch 'master' into lighthouse-246
This commit is contained in:
commit
cd4346962a
@ -1,9 +1,9 @@
|
||||
use crate::cached_beacon_state::CachedBeaconState;
|
||||
use log::trace;
|
||||
use state_processing::validate_attestation_without_signature;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use types::{
|
||||
beacon_state::BeaconStateError, AggregateSignature, Attestation, AttestationData, BeaconState,
|
||||
Bitfield, ChainSpec, FreeAttestation, Signature,
|
||||
AggregateSignature, Attestation, AttestationData, BeaconState, BeaconStateError, Bitfield,
|
||||
ChainSpec, FreeAttestation, Signature,
|
||||
};
|
||||
|
||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
||||
@ -42,21 +42,28 @@ pub enum Message {
|
||||
BadSignature,
|
||||
/// The given `slot` does not match the validators committee assignment.
|
||||
BadSlot,
|
||||
/// The given `shard` does not match the validators committee assignment.
|
||||
/// The given `shard` does not match the validators committee assignment, or is not included in
|
||||
/// a committee for the given slot.
|
||||
BadShard,
|
||||
/// Attestation is from the epoch prior to this, ignoring.
|
||||
TooOld,
|
||||
}
|
||||
|
||||
macro_rules! some_or_invalid {
|
||||
($expression: expr, $error: expr) => {
|
||||
match $expression {
|
||||
Some(x) => x,
|
||||
None => {
|
||||
return Ok(Outcome {
|
||||
valid: false,
|
||||
message: $error,
|
||||
});
|
||||
}
|
||||
}
|
||||
macro_rules! valid_outcome {
|
||||
($error: expr) => {
|
||||
return Ok(Outcome {
|
||||
valid: true,
|
||||
message: $error,
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! invalid_outcome {
|
||||
($error: expr) => {
|
||||
return Ok(Outcome {
|
||||
valid: false,
|
||||
message: $error,
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
@ -77,49 +84,59 @@ impl AttestationAggregator {
|
||||
/// - The signature is verified against that of the validator at `validator_index`.
|
||||
pub fn process_free_attestation(
|
||||
&mut self,
|
||||
cached_state: &CachedBeaconState,
|
||||
cached_state: &BeaconState,
|
||||
free_attestation: &FreeAttestation,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Outcome, BeaconStateError> {
|
||||
let (slot, shard, committee_index) = some_or_invalid!(
|
||||
cached_state.attestation_slot_and_shard_for_validator(
|
||||
free_attestation.validator_index as usize,
|
||||
spec,
|
||||
)?,
|
||||
Message::BadValidatorIndex
|
||||
let attestation_duties = match cached_state.attestation_slot_and_shard_for_validator(
|
||||
free_attestation.validator_index as usize,
|
||||
spec,
|
||||
) {
|
||||
Err(BeaconStateError::EpochCacheUninitialized(e)) => {
|
||||
panic!("Attempted to access unbuilt cache {:?}.", e)
|
||||
}
|
||||
Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld),
|
||||
Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard),
|
||||
Err(e) => return Err(e),
|
||||
Ok(None) => invalid_outcome!(Message::BadValidatorIndex),
|
||||
Ok(Some(attestation_duties)) => attestation_duties,
|
||||
};
|
||||
|
||||
let (slot, shard, committee_index) = attestation_duties;
|
||||
|
||||
trace!(
|
||||
"slot: {}, shard: {}, committee_index: {}, val_index: {}",
|
||||
slot,
|
||||
shard,
|
||||
committee_index,
|
||||
free_attestation.validator_index
|
||||
);
|
||||
|
||||
if free_attestation.data.slot != slot {
|
||||
return Ok(Outcome {
|
||||
valid: false,
|
||||
message: Message::BadSlot,
|
||||
});
|
||||
invalid_outcome!(Message::BadSlot);
|
||||
}
|
||||
if free_attestation.data.shard != shard {
|
||||
return Ok(Outcome {
|
||||
valid: false,
|
||||
message: Message::BadShard,
|
||||
});
|
||||
invalid_outcome!(Message::BadShard);
|
||||
}
|
||||
|
||||
let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT);
|
||||
|
||||
let validator_record = some_or_invalid!(
|
||||
cached_state
|
||||
.state
|
||||
.validator_registry
|
||||
.get(free_attestation.validator_index as usize),
|
||||
Message::BadValidatorIndex
|
||||
);
|
||||
|
||||
if !free_attestation
|
||||
.signature
|
||||
.verify(&signable_message, &validator_record.pubkey)
|
||||
let validator_record = match cached_state
|
||||
.validator_registry
|
||||
.get(free_attestation.validator_index as usize)
|
||||
{
|
||||
return Ok(Outcome {
|
||||
valid: false,
|
||||
message: Message::BadSignature,
|
||||
});
|
||||
None => invalid_outcome!(Message::BadValidatorIndex),
|
||||
Some(validator_record) => validator_record,
|
||||
};
|
||||
|
||||
if !free_attestation.signature.verify(
|
||||
&signable_message,
|
||||
cached_state
|
||||
.fork
|
||||
.get_domain(cached_state.current_epoch(spec), spec.domain_attestation),
|
||||
&validator_record.pubkey,
|
||||
) {
|
||||
invalid_outcome!(Message::BadSignature);
|
||||
}
|
||||
|
||||
if let Some(existing_attestation) = self.store.get(&signable_message) {
|
||||
@ -129,15 +146,9 @@ impl AttestationAggregator {
|
||||
committee_index as usize,
|
||||
) {
|
||||
self.store.insert(signable_message, updated_attestation);
|
||||
Ok(Outcome {
|
||||
valid: true,
|
||||
message: Message::Aggregated,
|
||||
})
|
||||
valid_outcome!(Message::Aggregated);
|
||||
} else {
|
||||
Ok(Outcome {
|
||||
valid: true,
|
||||
message: Message::AggregationNotRequired,
|
||||
})
|
||||
valid_outcome!(Message::AggregationNotRequired);
|
||||
}
|
||||
} else {
|
||||
let mut aggregate_signature = AggregateSignature::new();
|
||||
@ -151,10 +162,7 @@ impl AttestationAggregator {
|
||||
aggregate_signature,
|
||||
};
|
||||
self.store.insert(signable_message, new_attestation);
|
||||
Ok(Outcome {
|
||||
valid: true,
|
||||
message: Message::NewAttestationCreated,
|
||||
})
|
||||
valid_outcome!(Message::NewAttestationCreated);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
|
||||
use crate::cached_beacon_state::CachedBeaconState;
|
||||
use crate::checkpoint::CheckPoint;
|
||||
use db::{
|
||||
stores::{BeaconBlockStore, BeaconStateStore},
|
||||
@ -15,10 +14,10 @@ use state_processing::{
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use types::{
|
||||
beacon_state::BeaconStateError,
|
||||
readers::{BeaconBlockReader, BeaconStateReader},
|
||||
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Crosslink, Deposit,
|
||||
Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, Signature, Slot,
|
||||
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, BeaconStateError, ChainSpec,
|
||||
Crosslink, Deposit, Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, RelativeEpoch,
|
||||
Signature, Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -70,7 +69,6 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
|
||||
canonical_head: RwLock<CheckPoint>,
|
||||
finalized_head: RwLock<CheckPoint>,
|
||||
pub state: RwLock<BeaconState>,
|
||||
pub cached_state: RwLock<CachedBeaconState>,
|
||||
pub spec: ChainSpec,
|
||||
pub fork_choice: RwLock<F>,
|
||||
}
|
||||
@ -96,7 +94,7 @@ where
|
||||
return Err(Error::InsufficientValidators);
|
||||
}
|
||||
|
||||
let genesis_state = BeaconState::genesis(
|
||||
let mut genesis_state = BeaconState::genesis(
|
||||
genesis_time,
|
||||
initial_validator_deposits,
|
||||
latest_eth1_data,
|
||||
@ -109,32 +107,32 @@ where
|
||||
let block_root = genesis_block.canonical_root();
|
||||
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
|
||||
|
||||
let cached_state = RwLock::new(CachedBeaconState::from_beacon_state(
|
||||
genesis_state.clone(),
|
||||
spec.clone(),
|
||||
)?);
|
||||
|
||||
let finalized_head = RwLock::new(CheckPoint::new(
|
||||
genesis_block.clone(),
|
||||
block_root,
|
||||
// TODO: this is a memory waste; remove full clone.
|
||||
genesis_state.clone(),
|
||||
state_root,
|
||||
));
|
||||
let canonical_head = RwLock::new(CheckPoint::new(
|
||||
genesis_block.clone(),
|
||||
block_root,
|
||||
// TODO: this is a memory waste; remove full clone.
|
||||
genesis_state.clone(),
|
||||
state_root,
|
||||
));
|
||||
let attestation_aggregator = RwLock::new(AttestationAggregator::new());
|
||||
|
||||
genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?;
|
||||
genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?;
|
||||
genesis_state.build_epoch_cache(RelativeEpoch::Next, &spec)?;
|
||||
|
||||
Ok(Self {
|
||||
block_store,
|
||||
state_store,
|
||||
slot_clock,
|
||||
attestation_aggregator,
|
||||
state: RwLock::new(genesis_state.clone()),
|
||||
cached_state,
|
||||
state: RwLock::new(genesis_state),
|
||||
finalized_head,
|
||||
canonical_head,
|
||||
spec,
|
||||
@ -150,6 +148,10 @@ where
|
||||
new_beacon_state: BeaconState,
|
||||
new_beacon_state_root: Hash256,
|
||||
) {
|
||||
debug!(
|
||||
"Updating canonical head with block at slot: {}",
|
||||
new_beacon_block.slot
|
||||
);
|
||||
let mut head = self.canonical_head.write();
|
||||
head.update(
|
||||
new_beacon_block,
|
||||
@ -288,7 +290,7 @@ where
|
||||
validator_index
|
||||
);
|
||||
if let Some((slot, shard, _committee)) = self
|
||||
.cached_state
|
||||
.state
|
||||
.read()
|
||||
.attestation_slot_and_shard_for_validator(validator_index, &self.spec)?
|
||||
{
|
||||
@ -346,7 +348,7 @@ where
|
||||
let aggregation_outcome = self
|
||||
.attestation_aggregator
|
||||
.write()
|
||||
.process_free_attestation(&self.cached_state.read(), &free_attestation, &self.spec)?;
|
||||
.process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?;
|
||||
|
||||
// return if the attestation is invalid
|
||||
if !aggregation_outcome.valid {
|
||||
@ -496,17 +498,9 @@ where
|
||||
// TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be
|
||||
// run instead.
|
||||
if self.head().beacon_block_root == parent_block_root {
|
||||
self.update_canonical_head(
|
||||
block.clone(),
|
||||
block_root.clone(),
|
||||
state.clone(),
|
||||
state_root,
|
||||
);
|
||||
self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
|
||||
// Update the local state variable.
|
||||
*self.state.write() = state.clone();
|
||||
// Update the cached state variable.
|
||||
*self.cached_state.write() =
|
||||
CachedBeaconState::from_beacon_state(state.clone(), self.spec.clone())?;
|
||||
}
|
||||
|
||||
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
|
||||
|
@ -1,150 +0,0 @@
|
||||
use log::{debug, trace};
|
||||
use std::collections::HashMap;
|
||||
use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Epoch, Slot};
|
||||
|
||||
pub const CACHE_PREVIOUS: bool = false;
|
||||
pub const CACHE_CURRENT: bool = true;
|
||||
pub const CACHE_NEXT: bool = false;
|
||||
|
||||
pub type CrosslinkCommittees = Vec<(Vec<usize>, u64)>;
|
||||
pub type Shard = u64;
|
||||
pub type CommitteeIndex = u64;
|
||||
pub type AttestationDuty = (Slot, Shard, CommitteeIndex);
|
||||
pub type AttestationDutyMap = HashMap<u64, AttestationDuty>;
|
||||
|
||||
// TODO: CachedBeaconState is presently duplicating `BeaconState` and `ChainSpec`. This is a
|
||||
// massive memory waste, switch them to references.
|
||||
|
||||
pub struct CachedBeaconState {
|
||||
pub state: BeaconState,
|
||||
committees: Vec<Vec<CrosslinkCommittees>>,
|
||||
attestation_duties: Vec<AttestationDutyMap>,
|
||||
next_epoch: Epoch,
|
||||
current_epoch: Epoch,
|
||||
previous_epoch: Epoch,
|
||||
spec: ChainSpec,
|
||||
}
|
||||
|
||||
impl CachedBeaconState {
|
||||
pub fn from_beacon_state(
|
||||
state: BeaconState,
|
||||
spec: ChainSpec,
|
||||
) -> Result<Self, BeaconStateError> {
|
||||
let current_epoch = state.current_epoch(&spec);
|
||||
let previous_epoch = if current_epoch == spec.genesis_epoch {
|
||||
current_epoch
|
||||
} else {
|
||||
current_epoch.saturating_sub(1_u64)
|
||||
};
|
||||
let next_epoch = state.next_epoch(&spec);
|
||||
|
||||
let mut committees: Vec<Vec<CrosslinkCommittees>> = Vec::with_capacity(3);
|
||||
let mut attestation_duties: Vec<AttestationDutyMap> = Vec::with_capacity(3);
|
||||
|
||||
if CACHE_PREVIOUS {
|
||||
debug!("from_beacon_state: building previous epoch cache.");
|
||||
let cache = build_epoch_cache(&state, previous_epoch, &spec)?;
|
||||
committees.push(cache.committees);
|
||||
attestation_duties.push(cache.attestation_duty_map);
|
||||
} else {
|
||||
committees.push(vec![]);
|
||||
attestation_duties.push(HashMap::new());
|
||||
}
|
||||
if CACHE_CURRENT {
|
||||
debug!("from_beacon_state: building current epoch cache.");
|
||||
let cache = build_epoch_cache(&state, current_epoch, &spec)?;
|
||||
committees.push(cache.committees);
|
||||
attestation_duties.push(cache.attestation_duty_map);
|
||||
} else {
|
||||
committees.push(vec![]);
|
||||
attestation_duties.push(HashMap::new());
|
||||
}
|
||||
if CACHE_NEXT {
|
||||
debug!("from_beacon_state: building next epoch cache.");
|
||||
let cache = build_epoch_cache(&state, next_epoch, &spec)?;
|
||||
committees.push(cache.committees);
|
||||
attestation_duties.push(cache.attestation_duty_map);
|
||||
} else {
|
||||
committees.push(vec![]);
|
||||
attestation_duties.push(HashMap::new());
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
state,
|
||||
committees,
|
||||
attestation_duties,
|
||||
next_epoch,
|
||||
current_epoch,
|
||||
previous_epoch,
|
||||
spec,
|
||||
})
|
||||
}
|
||||
|
||||
fn slot_to_cache_index(&self, slot: Slot) -> Option<usize> {
|
||||
trace!("slot_to_cache_index: cache lookup");
|
||||
match slot.epoch(self.spec.epoch_length) {
|
||||
epoch if (epoch == self.previous_epoch) & CACHE_PREVIOUS => Some(0),
|
||||
epoch if (epoch == self.current_epoch) & CACHE_CURRENT => Some(1),
|
||||
epoch if (epoch == self.next_epoch) & CACHE_NEXT => Some(2),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an
|
||||
/// attestation.
|
||||
///
|
||||
/// Cached method.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn attestation_slot_and_shard_for_validator(
|
||||
&self,
|
||||
validator_index: usize,
|
||||
_spec: &ChainSpec,
|
||||
) -> Result<Option<(Slot, u64, u64)>, BeaconStateError> {
|
||||
// Get the result for this epoch.
|
||||
let cache_index = self
|
||||
.slot_to_cache_index(self.state.slot)
|
||||
.expect("Current epoch should always have a cache index.");
|
||||
|
||||
let duties = self.attestation_duties[cache_index]
|
||||
.get(&(validator_index as u64))
|
||||
.and_then(|tuple| Some(*tuple));
|
||||
|
||||
Ok(duties)
|
||||
}
|
||||
}
|
||||
|
||||
struct EpochCacheResult {
|
||||
committees: Vec<CrosslinkCommittees>,
|
||||
attestation_duty_map: AttestationDutyMap,
|
||||
}
|
||||
|
||||
fn build_epoch_cache(
|
||||
state: &BeaconState,
|
||||
epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<EpochCacheResult, BeaconStateError> {
|
||||
let mut epoch_committees: Vec<CrosslinkCommittees> =
|
||||
Vec::with_capacity(spec.epoch_length as usize);
|
||||
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
|
||||
|
||||
for slot in epoch.slot_iter(spec.epoch_length) {
|
||||
let slot_committees = state.get_crosslink_committees_at_slot(slot, false, spec)?;
|
||||
|
||||
for (committee, shard) in slot_committees {
|
||||
for (committee_index, validator_index) in committee.iter().enumerate() {
|
||||
attestation_duty_map.insert(
|
||||
*validator_index as u64,
|
||||
(slot, shard, committee_index as u64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
epoch_committees.push(state.get_crosslink_committees_at_slot(slot, false, spec)?)
|
||||
}
|
||||
|
||||
Ok(EpochCacheResult {
|
||||
committees: epoch_committees,
|
||||
attestation_duty_map,
|
||||
})
|
||||
}
|
@ -3,7 +3,7 @@ use types::{BeaconBlock, BeaconState, Hash256};
|
||||
|
||||
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
||||
/// head, justified head and finalized head.
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[derive(Clone, Serialize)]
|
||||
pub struct CheckPoint {
|
||||
pub beacon_block: BeaconBlock,
|
||||
pub beacon_block_root: Hash256,
|
||||
|
@ -1,8 +1,9 @@
|
||||
mod attestation_aggregator;
|
||||
mod beacon_chain;
|
||||
mod cached_beacon_state;
|
||||
mod checkpoint;
|
||||
|
||||
pub use self::beacon_chain::{BeaconChain, Error};
|
||||
pub use self::beacon_chain::{
|
||||
BeaconChain, BlockProcessingOutcome, Error, InvalidBlock, ValidBlock,
|
||||
};
|
||||
pub use self::checkpoint::CheckPoint;
|
||||
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};
|
||||
|
@ -1,5 +1,5 @@
|
||||
use super::ValidatorHarness;
|
||||
use beacon_chain::BeaconChain;
|
||||
use beacon_chain::{BeaconChain, BlockProcessingOutcome};
|
||||
pub use beacon_chain::{CheckPoint, Error as BeaconChainError};
|
||||
use bls::create_proof_of_possession;
|
||||
use db::{
|
||||
@ -157,7 +157,7 @@ impl BeaconChainHarness {
|
||||
.beacon_chain
|
||||
.state
|
||||
.read()
|
||||
.get_crosslink_committees_at_slot(present_slot, false, &self.spec)
|
||||
.get_crosslink_committees_at_slot(present_slot, &self.spec)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.fold(vec![], |mut acc, (committee, _slot)| {
|
||||
@ -223,7 +223,10 @@ impl BeaconChainHarness {
|
||||
debug!("Producing block...");
|
||||
let block = self.produce_block();
|
||||
debug!("Submitting block for processing...");
|
||||
self.beacon_chain.process_block(block).unwrap();
|
||||
match self.beacon_chain.process_block(block) {
|
||||
Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
|
||||
other => panic!("block processing failed with {:?}", other),
|
||||
};
|
||||
debug!("...block processed by BeaconChain.");
|
||||
|
||||
debug!("Producing free attestations...");
|
||||
@ -242,6 +245,10 @@ impl BeaconChainHarness {
|
||||
debug!("Free attestations processed.");
|
||||
}
|
||||
|
||||
pub fn run_fork_choice(&mut self) {
|
||||
self.beacon_chain.fork_choice().unwrap()
|
||||
}
|
||||
|
||||
/// Dump all blocks and states from the canonical beacon chain.
|
||||
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
|
||||
self.beacon_chain.chain_dump()
|
||||
|
@ -25,23 +25,23 @@ impl LocalSigner {
|
||||
}
|
||||
|
||||
/// Sign some message.
|
||||
fn bls_sign(&self, message: &[u8]) -> Option<Signature> {
|
||||
Some(Signature::new(message, &self.keypair.sk))
|
||||
fn bls_sign(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockProposerSigner for LocalSigner {
|
||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature> {
|
||||
self.bls_sign(message)
|
||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
self.bls_sign(message, domain)
|
||||
}
|
||||
|
||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature> {
|
||||
self.bls_sign(message)
|
||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
self.bls_sign(message, domain)
|
||||
}
|
||||
}
|
||||
|
||||
impl AttesterSigner for LocalSigner {
|
||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature> {
|
||||
self.bls_sign(message)
|
||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
self.bls_sign(message, domain)
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,9 @@ fn it_can_produce_past_first_epoch_boundary() {
|
||||
harness.advance_chain_with_block();
|
||||
debug!("Produced block {}/{}.", i + 1, blocks);
|
||||
}
|
||||
|
||||
harness.run_fork_choice();
|
||||
|
||||
let dump = harness.chain_dump().expect("Chain dump failed.");
|
||||
|
||||
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
|
||||
|
@ -10,6 +10,7 @@ pub use self::traits::{
|
||||
};
|
||||
|
||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
||||
const DOMAIN_ATTESTATION: u64 = 1;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum PollOutcome {
|
||||
@ -136,8 +137,10 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V,
|
||||
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
|
||||
self.store_produce(attestation_data);
|
||||
|
||||
self.signer
|
||||
.sign_attestation_message(&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..])
|
||||
self.signer.sign_attestation_message(
|
||||
&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..],
|
||||
DOMAIN_ATTESTATION,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns `true` if signing some attestation_data is safe (non-slashable).
|
||||
|
@ -25,7 +25,7 @@ impl LocalSigner {
|
||||
}
|
||||
|
||||
impl Signer for LocalSigner {
|
||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature> {
|
||||
Some(Signature::new(message, &self.keypair.sk))
|
||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||
}
|
||||
}
|
||||
|
@ -45,5 +45,5 @@ pub trait DutiesReader: Send + Sync {
|
||||
|
||||
/// Signs message using an internally-maintained private key.
|
||||
pub trait Signer {
|
||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature>;
|
||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||
}
|
||||
|
@ -134,7 +134,10 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
|
||||
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
|
||||
let message = int_to_bytes32(slot.epoch(self.spec.epoch_length).as_u64());
|
||||
|
||||
match self.signer.sign_randao_reveal(&message) {
|
||||
match self
|
||||
.signer
|
||||
.sign_randao_reveal(&message, self.spec.domain_randao)
|
||||
{
|
||||
None => return Ok(PollOutcome::SignerRejection(slot)),
|
||||
Some(signature) => signature,
|
||||
}
|
||||
@ -166,10 +169,10 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
|
||||
fn sign_block(&mut self, mut block: BeaconBlock) -> Option<BeaconBlock> {
|
||||
self.store_produce(&block);
|
||||
|
||||
match self
|
||||
.signer
|
||||
.sign_block_proposal(&block.proposal_root(&self.spec)[..])
|
||||
{
|
||||
match self.signer.sign_block_proposal(
|
||||
&block.proposal_root(&self.spec)[..],
|
||||
self.spec.domain_proposal,
|
||||
) {
|
||||
None => None,
|
||||
Some(signature) => {
|
||||
block.signature = signature;
|
||||
|
@ -25,11 +25,11 @@ impl LocalSigner {
|
||||
}
|
||||
|
||||
impl Signer for LocalSigner {
|
||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature> {
|
||||
Some(Signature::new(message, &self.keypair.sk))
|
||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||
}
|
||||
|
||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature> {
|
||||
Some(Signature::new(message, &self.keypair.sk))
|
||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||
}
|
||||
}
|
||||
|
@ -44,6 +44,6 @@ pub trait DutiesReader: Send + Sync {
|
||||
|
||||
/// Signs message using an internally-maintained private key.
|
||||
pub trait Signer {
|
||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature>;
|
||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature>;
|
||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||
}
|
||||
|
1
eth2/fork_choice/src/protolambda_lmd_ghost.rs
Normal file
1
eth2/fork_choice/src/protolambda_lmd_ghost.rs
Normal file
@ -0,0 +1 @@
|
||||
|
@ -4,9 +4,8 @@ use int_to_bytes::int_to_bytes32;
|
||||
use log::{debug, trace};
|
||||
use ssz::{ssz_encode, TreeHash};
|
||||
use types::{
|
||||
beacon_state::{AttestationParticipantsError, BeaconStateError},
|
||||
AggregatePublicKey, Attestation, BeaconBlock, BeaconState, ChainSpec, Crosslink, Epoch, Exit,
|
||||
Fork, Hash256, PendingAttestation, PublicKey, Signature,
|
||||
AggregatePublicKey, Attestation, BeaconBlock, BeaconState, BeaconStateError, ChainSpec,
|
||||
Crosslink, Epoch, Exit, Fork, Hash256, PendingAttestation, PublicKey, RelativeEpoch, Signature,
|
||||
};
|
||||
|
||||
// TODO: define elsehwere.
|
||||
@ -27,7 +26,6 @@ pub enum Error {
|
||||
MissingBeaconBlock(Hash256),
|
||||
InvalidBeaconBlock(Hash256),
|
||||
MissingParentBlock(Hash256),
|
||||
NoBlockProducer,
|
||||
StateSlotMismatch,
|
||||
BadBlockSignature,
|
||||
BadRandaoSignature,
|
||||
@ -56,7 +54,7 @@ pub enum AttestationValidationError {
|
||||
BadSignature,
|
||||
ShardBlockRootNotZero,
|
||||
NoBlockRoot,
|
||||
AttestationParticipantsError(AttestationParticipantsError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
}
|
||||
|
||||
macro_rules! ensure {
|
||||
@ -98,12 +96,15 @@ fn per_block_processing_signature_optional(
|
||||
) -> Result<(), Error> {
|
||||
ensure!(block.slot == state.slot, Error::StateSlotMismatch);
|
||||
|
||||
// Building the previous epoch could be delayed until an attestation from a previous epoch is
|
||||
// included. This is left for future optimisation.
|
||||
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
||||
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
||||
|
||||
/*
|
||||
* Proposer Signature
|
||||
*/
|
||||
let block_proposer_index = state
|
||||
.get_beacon_proposer_index(block.slot, spec)
|
||||
.map_err(|_| Error::NoBlockProducer)?;
|
||||
let block_proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
|
||||
let block_proposer = &state.validator_registry[block_proposer_index];
|
||||
|
||||
if verify_block_signature {
|
||||
@ -361,6 +362,12 @@ fn validate_attestation_signature_optional(
|
||||
&attestation.aggregation_bitfield,
|
||||
spec,
|
||||
)?;
|
||||
trace!(
|
||||
"slot: {}, shard: {}, participants: {:?}",
|
||||
attestation.data.slot,
|
||||
attestation.data.shard,
|
||||
participants
|
||||
);
|
||||
let mut group_public_key = AggregatePublicKey::new();
|
||||
for participant in participants {
|
||||
group_public_key.add(
|
||||
@ -389,14 +396,12 @@ fn validate_attestation_signature_optional(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_domain(_fork: &Fork, _epoch: Epoch, _domain_type: u64) -> u64 {
|
||||
// TODO: stubbed out.
|
||||
0
|
||||
fn get_domain(fork: &Fork, epoch: Epoch, domain_type: u64) -> u64 {
|
||||
fork.get_domain(epoch, domain_type)
|
||||
}
|
||||
|
||||
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool {
|
||||
// TODO: add domain
|
||||
signature.verify(message, pubkey)
|
||||
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, domain: u64) -> bool {
|
||||
signature.verify(message, domain, pubkey)
|
||||
}
|
||||
|
||||
impl From<AttestationValidationError> for Error {
|
||||
@ -417,8 +422,8 @@ impl From<SlotProcessingError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AttestationParticipantsError> for AttestationValidationError {
|
||||
fn from(e: AttestationParticipantsError) -> AttestationValidationError {
|
||||
AttestationValidationError::AttestationParticipantsError(e)
|
||||
impl From<BeaconStateError> for AttestationValidationError {
|
||||
fn from(e: BeaconStateError) -> AttestationValidationError {
|
||||
AttestationValidationError::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
@ -5,9 +5,8 @@ use ssz::TreeHash;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::iter::FromIterator;
|
||||
use types::{
|
||||
beacon_state::{AttestationParticipantsError, BeaconStateError, InclusionError},
|
||||
validator_registry::get_active_validator_indices,
|
||||
BeaconState, ChainSpec, Crosslink, Epoch, Hash256, PendingAttestation,
|
||||
validator_registry::get_active_validator_indices, BeaconState, BeaconStateError, ChainSpec,
|
||||
Crosslink, Epoch, Hash256, InclusionError, PendingAttestation, RelativeEpoch,
|
||||
};
|
||||
|
||||
macro_rules! safe_add_assign {
|
||||
@ -28,7 +27,6 @@ pub enum Error {
|
||||
BaseRewardQuotientIsZero,
|
||||
NoRandaoSeed,
|
||||
BeaconStateError(BeaconStateError),
|
||||
AttestationParticipantsError(AttestationParticipantsError),
|
||||
InclusionError(InclusionError),
|
||||
WinningRootError(WinningRootError),
|
||||
}
|
||||
@ -36,7 +34,7 @@ pub enum Error {
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum WinningRootError {
|
||||
NoWinningRoot,
|
||||
AttestationParticipantsError(AttestationParticipantsError),
|
||||
BeaconStateError(BeaconStateError),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -66,6 +64,11 @@ impl EpochProcessable for BeaconState {
|
||||
self.current_epoch(spec)
|
||||
);
|
||||
|
||||
// Ensure all of the caches are built.
|
||||
self.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
||||
self.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
||||
self.build_epoch_cache(RelativeEpoch::Next, spec)?;
|
||||
|
||||
/*
|
||||
* Validators attesting during the current epoch.
|
||||
*/
|
||||
@ -322,8 +325,11 @@ impl EpochProcessable for BeaconState {
|
||||
slot,
|
||||
slot.epoch(spec.epoch_length)
|
||||
);
|
||||
|
||||
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
|
||||
// `self.balances`.
|
||||
let crosslink_committees_at_slot =
|
||||
self.get_crosslink_committees_at_slot(slot, false, spec)?;
|
||||
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
|
||||
|
||||
for (crosslink_committee, shard) in crosslink_committees_at_slot {
|
||||
let shard = shard as u64;
|
||||
@ -499,8 +505,10 @@ impl EpochProcessable for BeaconState {
|
||||
* Crosslinks
|
||||
*/
|
||||
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
|
||||
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
|
||||
// `self.balances`.
|
||||
let crosslink_committees_at_slot =
|
||||
self.get_crosslink_committees_at_slot(slot, false, spec)?;
|
||||
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
|
||||
|
||||
for (_crosslink_committee, shard) in crosslink_committees_at_slot {
|
||||
let shard = shard as u64;
|
||||
@ -609,6 +617,12 @@ impl EpochProcessable for BeaconState {
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
/*
|
||||
* Manage the beacon state caches
|
||||
*/
|
||||
self.advance_caches();
|
||||
self.build_epoch_cache(RelativeEpoch::Next, spec)?;
|
||||
|
||||
debug!("Epoch transition complete.");
|
||||
|
||||
Ok(())
|
||||
@ -644,20 +658,18 @@ fn winning_root(
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: `cargo fmt` makes this rather ugly; tidy up.
|
||||
let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result<
|
||||
_,
|
||||
AttestationParticipantsError,
|
||||
>>(vec![], |mut acc, a| {
|
||||
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
|
||||
acc.append(&mut state.get_attestation_participants(
|
||||
&a.data,
|
||||
&a.aggregation_bitfield,
|
||||
spec,
|
||||
)?);
|
||||
}
|
||||
Ok(acc)
|
||||
})?;
|
||||
let attesting_validator_indices = attestations
|
||||
.iter()
|
||||
.try_fold::<_, _, Result<_, BeaconStateError>>(vec![], |mut acc, a| {
|
||||
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
|
||||
acc.append(&mut state.get_attestation_participants(
|
||||
&a.data,
|
||||
&a.aggregation_bitfield,
|
||||
spec,
|
||||
)?);
|
||||
}
|
||||
Ok(acc)
|
||||
})?;
|
||||
|
||||
let total_balance: u64 = attesting_validator_indices
|
||||
.iter()
|
||||
@ -708,15 +720,9 @@ impl From<BeaconStateError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AttestationParticipantsError> for Error {
|
||||
fn from(e: AttestationParticipantsError) -> Error {
|
||||
Error::AttestationParticipantsError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AttestationParticipantsError> for WinningRootError {
|
||||
fn from(e: AttestationParticipantsError) -> WinningRootError {
|
||||
WinningRootError::AttestationParticipantsError(e)
|
||||
impl From<BeaconStateError> for WinningRootError {
|
||||
fn from(e: BeaconStateError) -> WinningRootError {
|
||||
WinningRootError::BeaconStateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::{EpochProcessable, EpochProcessingError};
|
||||
use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Hash256};
|
||||
use types::{BeaconState, BeaconStateError, ChainSpec, Hash256};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
|
@ -27,11 +27,13 @@ impl Attestation {
|
||||
&self,
|
||||
group_public_key: &AggregatePublicKey,
|
||||
custody_bit: bool,
|
||||
// TODO: use domain.
|
||||
_domain: u64,
|
||||
domain: u64,
|
||||
) -> bool {
|
||||
self.aggregate_signature
|
||||
.verify(&self.signable_message(custody_bit), group_public_key)
|
||||
self.aggregate_signature.verify(
|
||||
&self.signable_message(custody_bit),
|
||||
domain,
|
||||
group_public_key,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,24 +1,46 @@
|
||||
use self::epoch_cache::EpochCache;
|
||||
use crate::test_utils::TestRandom;
|
||||
use crate::{
|
||||
validator::StatusFlags, validator_registry::get_active_validator_indices, AttestationData,
|
||||
Bitfield, ChainSpec, Crosslink, Deposit, Epoch, Eth1Data, Eth1DataVote, Fork, Hash256,
|
||||
PendingAttestation, PublicKey, Signature, Slot, Validator,
|
||||
Bitfield, ChainSpec, Crosslink, Deposit, DepositData, DepositInput, Epoch, Eth1Data,
|
||||
Eth1DataVote, Fork, Hash256, PendingAttestation, PublicKey, Signature, Slot, Validator,
|
||||
};
|
||||
use bls::verify_proof_of_possession;
|
||||
use honey_badger_split::SplitExt;
|
||||
use log::trace;
|
||||
use log::{debug, trace};
|
||||
use rand::RngCore;
|
||||
use serde_derive::Serialize;
|
||||
use ssz::{hash, TreeHash};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash};
|
||||
use std::collections::HashMap;
|
||||
use swap_or_not_shuffle::get_permutated_index;
|
||||
use test_random_derive::TestRandom;
|
||||
|
||||
mod epoch_cache;
|
||||
mod tests;
|
||||
|
||||
pub type Committee = Vec<usize>;
|
||||
pub type CrosslinkCommittees = Vec<(Committee, u64)>;
|
||||
pub type Shard = u64;
|
||||
pub type CommitteeIndex = u64;
|
||||
pub type AttestationDuty = (Slot, Shard, CommitteeIndex);
|
||||
pub type AttestationDutyMap = HashMap<u64, AttestationDuty>;
|
||||
pub type ShardCommitteeIndexMap = HashMap<Shard, (usize, usize)>;
|
||||
|
||||
pub const CACHED_EPOCHS: usize = 3;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum RelativeEpoch {
|
||||
Previous,
|
||||
Current,
|
||||
Next,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BeaconStateError {
|
||||
pub enum Error {
|
||||
EpochOutOfBounds,
|
||||
/// The supplied shard is unknown. It may be larger than the maximum shard count, or not in a
|
||||
/// committee for the given slot.
|
||||
ShardOutOfBounds,
|
||||
UnableToShuffle,
|
||||
InsufficientRandaoMixes,
|
||||
InsufficientValidators,
|
||||
@ -26,20 +48,14 @@ pub enum BeaconStateError {
|
||||
InsufficientIndexRoots,
|
||||
InsufficientAttestations,
|
||||
InsufficientCommittees,
|
||||
EpochCacheUninitialized(RelativeEpoch),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum InclusionError {
|
||||
/// The validator did not participate in an attestation in this period.
|
||||
NoAttestationsForValidator,
|
||||
AttestationParticipantsError(AttestationParticipantsError),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum AttestationParticipantsError {
|
||||
/// There is no committee for the given shard in the given epoch.
|
||||
NoCommitteeForShard,
|
||||
BeaconStateError(BeaconStateError),
|
||||
Error(Error),
|
||||
}
|
||||
|
||||
macro_rules! safe_add_assign {
|
||||
@ -53,7 +69,7 @@ macro_rules! safe_sub_assign {
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TestRandom)]
|
||||
#[derive(Debug, PartialEq, Clone, Default, Serialize)]
|
||||
pub struct BeaconState {
|
||||
// Misc
|
||||
pub slot: Slot,
|
||||
@ -91,6 +107,10 @@ pub struct BeaconState {
|
||||
// Ethereum 1.0 chain data
|
||||
pub latest_eth1_data: Eth1Data,
|
||||
pub eth1_data_votes: Vec<Eth1DataVote>,
|
||||
|
||||
// Caching
|
||||
pub cache_index_offset: usize,
|
||||
pub caches: Vec<EpochCache>,
|
||||
}
|
||||
|
||||
impl BeaconState {
|
||||
@ -100,7 +120,8 @@ impl BeaconState {
|
||||
initial_validator_deposits: Vec<Deposit>,
|
||||
latest_eth1_data: Eth1Data,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<BeaconState, BeaconStateError> {
|
||||
) -> Result<BeaconState, Error> {
|
||||
debug!("Creating genesis state.");
|
||||
let initial_crosslink = Crosslink {
|
||||
epoch: spec.genesis_epoch,
|
||||
shard_block_root: spec.zero_hash,
|
||||
@ -159,17 +180,22 @@ impl BeaconState {
|
||||
*/
|
||||
latest_eth1_data,
|
||||
eth1_data_votes: vec![],
|
||||
|
||||
/*
|
||||
* Caching (not in spec)
|
||||
*/
|
||||
cache_index_offset: 0,
|
||||
caches: vec![EpochCache::empty(); CACHED_EPOCHS],
|
||||
};
|
||||
|
||||
for deposit in initial_validator_deposits {
|
||||
let _index = genesis_state.process_deposit(
|
||||
deposit.deposit_data.deposit_input.pubkey,
|
||||
deposit.deposit_data.amount,
|
||||
deposit.deposit_data.deposit_input.proof_of_possession,
|
||||
deposit.deposit_data.deposit_input.withdrawal_credentials,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
let deposit_data = initial_validator_deposits
|
||||
.iter()
|
||||
.map(|deposit| &deposit.deposit_data)
|
||||
.collect();
|
||||
|
||||
genesis_state.process_deposits(deposit_data, spec);
|
||||
|
||||
trace!("Processed genesis deposits.");
|
||||
|
||||
for validator_index in 0..genesis_state.validator_registry.len() {
|
||||
if genesis_state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount
|
||||
@ -189,6 +215,99 @@ impl BeaconState {
|
||||
Ok(genesis_state)
|
||||
}
|
||||
|
||||
/// Build an epoch cache, unless it is has already been built.
|
||||
pub fn build_epoch_cache(
|
||||
&mut self,
|
||||
relative_epoch: RelativeEpoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
let cache_index = self.cache_index(relative_epoch);
|
||||
|
||||
if self.caches[cache_index].initialized {
|
||||
Ok(())
|
||||
} else {
|
||||
self.force_build_epoch_cache(relative_epoch, spec)
|
||||
}
|
||||
}
|
||||
|
||||
/// Always builds an epoch cache, even if it is already initialized.
|
||||
pub fn force_build_epoch_cache(
|
||||
&mut self,
|
||||
relative_epoch: RelativeEpoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
let epoch = self.absolute_epoch(relative_epoch, spec);
|
||||
let cache_index = self.cache_index(relative_epoch);
|
||||
|
||||
self.caches[cache_index] = EpochCache::initialized(&self, epoch, spec)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Converts a `RelativeEpoch` into an `Epoch` with respect to the epoch of this state.
|
||||
fn absolute_epoch(&self, relative_epoch: RelativeEpoch, spec: &ChainSpec) -> Epoch {
|
||||
match relative_epoch {
|
||||
RelativeEpoch::Previous => self.previous_epoch(spec),
|
||||
RelativeEpoch::Current => self.current_epoch(spec),
|
||||
RelativeEpoch::Next => self.next_epoch(spec),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts an `Epoch` into a `RelativeEpoch` with respect to the epoch of this state.
|
||||
///
|
||||
/// Returns an error if the given `epoch` not "previous", "current" or "next" compared to the
|
||||
/// epoch of this tate.
|
||||
fn relative_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Result<RelativeEpoch, Error> {
|
||||
match epoch {
|
||||
e if e == self.current_epoch(spec) => Ok(RelativeEpoch::Current),
|
||||
e if e == self.previous_epoch(spec) => Ok(RelativeEpoch::Previous),
|
||||
e if e == self.next_epoch(spec) => Ok(RelativeEpoch::Next),
|
||||
_ => Err(Error::EpochOutOfBounds),
|
||||
}
|
||||
}
|
||||
|
||||
/// Advances the cache for this state into the next epoch.
|
||||
///
|
||||
/// This should be used if the `slot` of this state is advanced beyond an epoch boundary.
|
||||
///
|
||||
/// The `Next` cache becomes the `Current` and the `Current` cache becomes the `Previous`. The
|
||||
/// `Previous` cache is abandoned.
|
||||
///
|
||||
/// Care should be taken to update the `Current` epoch in case a registry update is performed
|
||||
/// -- `Next` epoch is always _without_ a registry change. If you perform a registry update,
|
||||
/// you should rebuild the `Current` cache so it uses the new seed.
|
||||
pub fn advance_caches(&mut self) {
|
||||
let previous_cache_index = self.cache_index(RelativeEpoch::Previous);
|
||||
|
||||
self.caches[previous_cache_index] = EpochCache::empty();
|
||||
|
||||
self.cache_index_offset += 1;
|
||||
self.cache_index_offset %= CACHED_EPOCHS;
|
||||
}
|
||||
|
||||
/// Returns the index of `self.caches` for some `RelativeEpoch`.
|
||||
fn cache_index(&self, relative_epoch: RelativeEpoch) -> usize {
|
||||
let base_index = match relative_epoch {
|
||||
RelativeEpoch::Current => 1,
|
||||
RelativeEpoch::Previous => 0,
|
||||
RelativeEpoch::Next => 2,
|
||||
};
|
||||
|
||||
(base_index + self.cache_index_offset) % CACHED_EPOCHS
|
||||
}
|
||||
|
||||
/// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been
|
||||
/// initialized.
|
||||
fn cache(&self, relative_epoch: RelativeEpoch) -> Result<&EpochCache, Error> {
|
||||
let cache = &self.caches[self.cache_index(relative_epoch)];
|
||||
|
||||
if cache.initialized {
|
||||
Ok(cache)
|
||||
} else {
|
||||
Err(Error::EpochCacheUninitialized(relative_epoch))
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the tree hash root for this `BeaconState`.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
@ -256,11 +375,12 @@ impl BeaconState {
|
||||
}
|
||||
|
||||
/// Shuffle ``validators`` into crosslink committees seeded by ``seed`` and ``epoch``.
|
||||
///
|
||||
/// Return a list of ``committees_per_epoch`` committees where each
|
||||
/// committee is itself a list of validator indices.
|
||||
///
|
||||
/// Spec v0.1
|
||||
pub fn get_shuffling(
|
||||
/// Spec v0.2.0
|
||||
pub(crate) fn get_shuffling(
|
||||
&self,
|
||||
seed: Hash256,
|
||||
epoch: Epoch,
|
||||
@ -273,11 +393,6 @@ impl BeaconState {
|
||||
return None;
|
||||
}
|
||||
|
||||
trace!(
|
||||
"get_shuffling: active_validator_indices.len() == {}",
|
||||
active_validator_indices.len()
|
||||
);
|
||||
|
||||
let committees_per_epoch =
|
||||
self.get_epoch_committee_count(active_validator_indices.len(), spec);
|
||||
|
||||
@ -333,6 +448,9 @@ impl BeaconState {
|
||||
self.get_epoch_committee_count(current_active_validators.len(), spec)
|
||||
}
|
||||
|
||||
/// Return the index root at a recent `epoch`.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Option<Hash256> {
|
||||
let current_epoch = self.current_epoch(spec);
|
||||
|
||||
@ -341,38 +459,26 @@ impl BeaconState {
|
||||
+ 1;
|
||||
let latest_index_root = current_epoch + spec.entry_exit_delay;
|
||||
|
||||
trace!(
|
||||
"get_active_index_root: epoch: {}, earliest: {}, latest: {}",
|
||||
epoch,
|
||||
earliest_index_root,
|
||||
latest_index_root
|
||||
);
|
||||
|
||||
if (epoch >= earliest_index_root) & (epoch <= latest_index_root) {
|
||||
Some(self.latest_index_roots[epoch.as_usize() % spec.latest_index_roots_length])
|
||||
} else {
|
||||
trace!("get_active_index_root: epoch out of range.");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a seed for the given ``epoch``.
|
||||
/// Generate a seed for the given `epoch`.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn generate_seed(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Hash256, BeaconStateError> {
|
||||
pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result<Hash256, Error> {
|
||||
let mut input = self
|
||||
.get_randao_mix(epoch, spec)
|
||||
.ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?
|
||||
.ok_or_else(|| Error::InsufficientRandaoMixes)?
|
||||
.to_vec();
|
||||
|
||||
input.append(
|
||||
&mut self
|
||||
.get_active_index_root(epoch, spec)
|
||||
.ok_or_else(|| BeaconStateError::InsufficientIndexRoots)?
|
||||
.ok_or_else(|| Error::InsufficientIndexRoots)?
|
||||
.to_vec(),
|
||||
);
|
||||
|
||||
@ -382,85 +488,138 @@ impl BeaconState {
|
||||
Ok(Hash256::from(&hash(&input[..])[..]))
|
||||
}
|
||||
|
||||
/// Return the list of ``(committee, shard)`` tuples for the ``slot``.
|
||||
/// Returns the crosslink committees for some slot.
|
||||
///
|
||||
/// Note: There are two possible shufflings for crosslink committees for a
|
||||
/// `slot` in the next epoch: with and without a `registry_change`
|
||||
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn get_crosslink_committees_at_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<&CrosslinkCommittees, Error> {
|
||||
let epoch = slot.epoch(spec.epoch_length);
|
||||
let relative_epoch = self.relative_epoch(epoch, spec)?;
|
||||
let cache = self.cache(relative_epoch)?;
|
||||
|
||||
let slot_offset = slot - epoch.start_slot(spec.epoch_length);
|
||||
|
||||
Ok(&cache.committees[slot_offset.as_usize()])
|
||||
}
|
||||
|
||||
/// Returns the crosslink committees for some slot.
|
||||
///
|
||||
/// Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub(crate) fn get_shuffling_for_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
registry_change: bool,
|
||||
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<Vec<usize>>, Error> {
|
||||
let (_committees_per_epoch, seed, shuffling_epoch, _shuffling_start_shard) =
|
||||
self.get_committee_params_at_slot(slot, registry_change, spec)?;
|
||||
|
||||
self.get_shuffling(seed, shuffling_epoch, spec)
|
||||
.ok_or_else(|| Error::UnableToShuffle)
|
||||
}
|
||||
|
||||
/// Returns the following params for the given slot:
|
||||
///
|
||||
/// - epoch committee count
|
||||
/// - epoch seed
|
||||
/// - calculation epoch
|
||||
/// - start shard
|
||||
///
|
||||
/// In the spec, this functionality is included in the `get_crosslink_committees_at_slot(..)`
|
||||
/// function. It is separated here to allow the division of shuffling and committee building,
|
||||
/// as is required for efficient operations.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub(crate) fn get_committee_params_at_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
registry_change: bool,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<(Vec<usize>, u64)>, BeaconStateError> {
|
||||
) -> Result<(u64, Hash256, Epoch, u64), Error> {
|
||||
let epoch = slot.epoch(spec.epoch_length);
|
||||
let current_epoch = self.current_epoch(spec);
|
||||
let previous_epoch = self.previous_epoch(spec);
|
||||
let next_epoch = self.next_epoch(spec);
|
||||
|
||||
let (committees_per_epoch, seed, shuffling_epoch, shuffling_start_shard) =
|
||||
if epoch == current_epoch {
|
||||
trace!("get_crosslink_committees_at_slot: current_epoch");
|
||||
if epoch == current_epoch {
|
||||
trace!("get_committee_params_at_slot: current_epoch");
|
||||
Ok((
|
||||
self.get_current_epoch_committee_count(spec),
|
||||
self.current_epoch_seed,
|
||||
self.current_calculation_epoch,
|
||||
self.current_epoch_start_shard,
|
||||
))
|
||||
} else if epoch == previous_epoch {
|
||||
trace!("get_committee_params_at_slot: previous_epoch");
|
||||
Ok((
|
||||
self.get_previous_epoch_committee_count(spec),
|
||||
self.previous_epoch_seed,
|
||||
self.previous_calculation_epoch,
|
||||
self.previous_epoch_start_shard,
|
||||
))
|
||||
} else if epoch == next_epoch {
|
||||
trace!("get_committee_params_at_slot: next_epoch");
|
||||
let current_committees_per_epoch = self.get_current_epoch_committee_count(spec);
|
||||
let epochs_since_last_registry_update =
|
||||
current_epoch - self.validator_registry_update_epoch;
|
||||
let (seed, shuffling_start_shard) = if registry_change {
|
||||
let next_seed = self.generate_seed(next_epoch, spec)?;
|
||||
(
|
||||
self.get_current_epoch_committee_count(spec),
|
||||
self.current_epoch_seed,
|
||||
self.current_calculation_epoch,
|
||||
self.current_epoch_start_shard,
|
||||
)
|
||||
} else if epoch == previous_epoch {
|
||||
trace!("get_crosslink_committees_at_slot: previous_epoch");
|
||||
(
|
||||
self.get_previous_epoch_committee_count(spec),
|
||||
self.previous_epoch_seed,
|
||||
self.previous_calculation_epoch,
|
||||
self.previous_epoch_start_shard,
|
||||
)
|
||||
} else if epoch == next_epoch {
|
||||
trace!("get_crosslink_committees_at_slot: next_epoch");
|
||||
let current_committees_per_epoch = self.get_current_epoch_committee_count(spec);
|
||||
let epochs_since_last_registry_update =
|
||||
current_epoch - self.validator_registry_update_epoch;
|
||||
let (seed, shuffling_start_shard) = if registry_change {
|
||||
let next_seed = self.generate_seed(next_epoch, spec)?;
|
||||
(
|
||||
next_seed,
|
||||
(self.current_epoch_start_shard + current_committees_per_epoch)
|
||||
% spec.shard_count,
|
||||
)
|
||||
} else if (epochs_since_last_registry_update > 1)
|
||||
& epochs_since_last_registry_update.is_power_of_two()
|
||||
{
|
||||
let next_seed = self.generate_seed(next_epoch, spec)?;
|
||||
(next_seed, self.current_epoch_start_shard)
|
||||
} else {
|
||||
(self.current_epoch_seed, self.current_epoch_start_shard)
|
||||
};
|
||||
(
|
||||
self.get_next_epoch_committee_count(spec),
|
||||
seed,
|
||||
next_epoch,
|
||||
shuffling_start_shard,
|
||||
next_seed,
|
||||
(self.current_epoch_start_shard + current_committees_per_epoch)
|
||||
% spec.shard_count,
|
||||
)
|
||||
} else if (epochs_since_last_registry_update > 1)
|
||||
& epochs_since_last_registry_update.is_power_of_two()
|
||||
{
|
||||
let next_seed = self.generate_seed(next_epoch, spec)?;
|
||||
(next_seed, self.current_epoch_start_shard)
|
||||
} else {
|
||||
return Err(BeaconStateError::EpochOutOfBounds);
|
||||
(self.current_epoch_seed, self.current_epoch_start_shard)
|
||||
};
|
||||
Ok((
|
||||
self.get_next_epoch_committee_count(spec),
|
||||
seed,
|
||||
next_epoch,
|
||||
shuffling_start_shard,
|
||||
))
|
||||
} else {
|
||||
Err(Error::EpochOutOfBounds)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the list of ``(committee, shard)`` tuples for the ``slot``.
|
||||
///
|
||||
/// Note: There are two possible shufflings for crosslink committees for a
|
||||
/// `slot` in the next epoch: with and without a `registry_change`
|
||||
///
|
||||
/// Note: does not utilize the cache, `get_crosslink_committees_at_slot` is an equivalent
|
||||
/// function which uses the cache.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub(crate) fn calculate_crosslink_committees_at_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
registry_change: bool,
|
||||
shuffling: Vec<Vec<usize>>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<(Vec<usize>, u64)>, Error> {
|
||||
let (committees_per_epoch, _seed, _shuffling_epoch, shuffling_start_shard) =
|
||||
self.get_committee_params_at_slot(slot, registry_change, spec)?;
|
||||
|
||||
let shuffling = self
|
||||
.get_shuffling(seed, shuffling_epoch, spec)
|
||||
.ok_or_else(|| BeaconStateError::UnableToShuffle)?;
|
||||
let offset = slot.as_u64() % spec.epoch_length;
|
||||
let committees_per_slot = committees_per_epoch / spec.epoch_length;
|
||||
let slot_start_shard =
|
||||
(shuffling_start_shard + committees_per_slot * offset) % spec.shard_count;
|
||||
|
||||
trace!(
|
||||
"get_crosslink_committees_at_slot: committees_per_slot: {}, slot_start_shard: {}, seed: {}",
|
||||
committees_per_slot,
|
||||
slot_start_shard,
|
||||
seed
|
||||
);
|
||||
|
||||
let mut crosslinks_at_slot = vec![];
|
||||
for i in 0..committees_per_slot {
|
||||
let tuple = (
|
||||
@ -475,22 +634,22 @@ impl BeaconState {
|
||||
/// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an
|
||||
/// attestation.
|
||||
///
|
||||
/// Only reads the current epoch.
|
||||
///
|
||||
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn attestation_slot_and_shard_for_validator(
|
||||
&self,
|
||||
validator_index: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<(Slot, u64, u64)>, BeaconStateError> {
|
||||
let mut result = None;
|
||||
for slot in self.current_epoch(spec).slot_iter(spec.epoch_length) {
|
||||
for (committee, shard) in self.get_crosslink_committees_at_slot(slot, false, spec)? {
|
||||
if let Some(committee_index) = committee.iter().position(|&i| i == validator_index)
|
||||
{
|
||||
result = Some((slot, shard, committee_index as u64));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
_spec: &ChainSpec,
|
||||
) -> Result<Option<(Slot, u64, u64)>, Error> {
|
||||
let cache = self.cache(RelativeEpoch::Current)?;
|
||||
|
||||
Ok(cache
|
||||
.attestation_duty_map
|
||||
.get(&(validator_index as u64))
|
||||
.and_then(|tuple| Some(*tuple)))
|
||||
}
|
||||
|
||||
/// An entry or exit triggered in the ``epoch`` given by the input takes effect at
|
||||
@ -506,12 +665,8 @@ impl BeaconState {
|
||||
/// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn get_beacon_proposer_index(
|
||||
&self,
|
||||
slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<usize, BeaconStateError> {
|
||||
let committees = self.get_crosslink_committees_at_slot(slot, false, spec)?;
|
||||
pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> {
|
||||
let committees = self.get_crosslink_committees_at_slot(slot, spec)?;
|
||||
trace!(
|
||||
"get_beacon_proposer_index: slot: {}, committees_count: {}",
|
||||
slot,
|
||||
@ -519,11 +674,12 @@ impl BeaconState {
|
||||
);
|
||||
committees
|
||||
.first()
|
||||
.ok_or(BeaconStateError::InsufficientValidators)
|
||||
.ok_or(Error::InsufficientValidators)
|
||||
.and_then(|(first_committee, _)| {
|
||||
let index = (slot.as_usize())
|
||||
let index = slot
|
||||
.as_usize()
|
||||
.checked_rem(first_committee.len())
|
||||
.ok_or(BeaconStateError::InsufficientValidators)?;
|
||||
.ok_or(Error::InsufficientValidators)?;
|
||||
Ok(first_committee[index])
|
||||
})
|
||||
}
|
||||
@ -637,8 +793,72 @@ impl BeaconState {
|
||||
|
||||
self.validator_registry_update_epoch = current_epoch;
|
||||
}
|
||||
|
||||
/// Confirm validator owns PublicKey
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn validate_proof_of_possession(
|
||||
&self,
|
||||
pubkey: PublicKey,
|
||||
proof_of_possession: Signature,
|
||||
withdrawal_credentials: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> bool {
|
||||
let proof_of_possession_data = DepositInput {
|
||||
pubkey: pubkey.clone(),
|
||||
withdrawal_credentials,
|
||||
proof_of_possession: Signature::empty_signature(),
|
||||
};
|
||||
|
||||
proof_of_possession.verify(
|
||||
&proof_of_possession_data.hash_tree_root(),
|
||||
self.fork
|
||||
.get_domain(self.slot.epoch(spec.epoch_length), spec.domain_deposit),
|
||||
&pubkey,
|
||||
)
|
||||
}
|
||||
|
||||
/// Process multiple deposits in sequence.
|
||||
///
|
||||
/// Builds a hashmap of validator pubkeys to validator index and passes it to each successive
|
||||
/// call to `process_deposit(..)`. This requires much less computation than successive calls to
|
||||
/// `process_deposits(..)` without the hashmap.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn process_deposits(
|
||||
&mut self,
|
||||
deposits: Vec<&DepositData>,
|
||||
spec: &ChainSpec,
|
||||
) -> Vec<usize> {
|
||||
let mut added_indices = vec![];
|
||||
let mut pubkey_map: HashMap<PublicKey, usize> = HashMap::new();
|
||||
|
||||
for (i, validator) in self.validator_registry.iter().enumerate() {
|
||||
pubkey_map.insert(validator.pubkey.clone(), i);
|
||||
}
|
||||
|
||||
for deposit_data in deposits {
|
||||
let result = self.process_deposit(
|
||||
deposit_data.deposit_input.pubkey.clone(),
|
||||
deposit_data.amount,
|
||||
deposit_data.deposit_input.proof_of_possession.clone(),
|
||||
deposit_data.deposit_input.withdrawal_credentials,
|
||||
Some(&pubkey_map),
|
||||
spec,
|
||||
);
|
||||
if let Ok(index) = result {
|
||||
added_indices.push(index);
|
||||
}
|
||||
}
|
||||
added_indices
|
||||
}
|
||||
|
||||
/// Process a validator deposit, returning the validator index if the deposit is valid.
|
||||
///
|
||||
/// Optionally accepts a hashmap of all validator pubkeys to their validator index. Without
|
||||
/// this hashmap, each call to `process_deposits` requires an iteration though
|
||||
/// `self.validator_registry`. This becomes highly inefficient at scale.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn process_deposit(
|
||||
&mut self,
|
||||
@ -646,18 +866,32 @@ impl BeaconState {
|
||||
amount: u64,
|
||||
proof_of_possession: Signature,
|
||||
withdrawal_credentials: Hash256,
|
||||
pubkey_map: Option<&HashMap<PublicKey, usize>>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<usize, ()> {
|
||||
// TODO: ensure verify proof-of-possession represents the spec accurately.
|
||||
if !verify_proof_of_possession(&proof_of_possession, &pubkey) {
|
||||
// TODO: update proof of possession to function written above (
|
||||
// requires bls::create_proof_of_possession to be updated
|
||||
// https://github.com/sigp/lighthouse/issues/239
|
||||
if !verify_proof_of_possession(&proof_of_possession, &pubkey)
|
||||
//if !self.validate_proof_of_possession(
|
||||
// pubkey.clone(),
|
||||
// proof_of_possession,
|
||||
// withdrawal_credentials,
|
||||
// &spec,
|
||||
// )
|
||||
{
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if let Some(index) = self
|
||||
.validator_registry
|
||||
.iter()
|
||||
.position(|v| v.pubkey == pubkey)
|
||||
{
|
||||
let validator_index = if let Some(pubkey_map) = pubkey_map {
|
||||
pubkey_map.get(&pubkey).and_then(|i| Some(*i))
|
||||
} else {
|
||||
self.validator_registry
|
||||
.iter()
|
||||
.position(|v| v.pubkey == pubkey)
|
||||
};
|
||||
|
||||
if let Some(index) = validator_index {
|
||||
if self.validator_registry[index].withdrawal_credentials == withdrawal_credentials {
|
||||
safe_add_assign!(self.validator_balances[index], amount);
|
||||
Ok(index)
|
||||
@ -732,7 +966,7 @@ impl BeaconState {
|
||||
&mut self,
|
||||
validator_index: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BeaconStateError> {
|
||||
) -> Result<(), Error> {
|
||||
self.exit_validator(validator_index, spec);
|
||||
let current_epoch = self.current_epoch(spec);
|
||||
|
||||
@ -901,27 +1135,25 @@ impl BeaconState {
|
||||
&self,
|
||||
attestations: &[&PendingAttestation],
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<usize>, AttestationParticipantsError> {
|
||||
let mut all_participants = attestations.iter().try_fold::<_, _, Result<
|
||||
Vec<usize>,
|
||||
AttestationParticipantsError,
|
||||
>>(vec![], |mut acc, a| {
|
||||
acc.append(&mut self.get_attestation_participants(
|
||||
&a.data,
|
||||
&a.aggregation_bitfield,
|
||||
spec,
|
||||
)?);
|
||||
Ok(acc)
|
||||
})?;
|
||||
) -> Result<Vec<usize>, Error> {
|
||||
let mut all_participants = attestations
|
||||
.iter()
|
||||
.try_fold::<_, _, Result<Vec<usize>, Error>>(vec![], |mut acc, a| {
|
||||
acc.append(&mut self.get_attestation_participants(
|
||||
&a.data,
|
||||
&a.aggregation_bitfield,
|
||||
spec,
|
||||
)?);
|
||||
Ok(acc)
|
||||
})?;
|
||||
all_participants.sort_unstable();
|
||||
all_participants.dedup();
|
||||
Ok(all_participants)
|
||||
}
|
||||
|
||||
/// Return the participant indices at for the ``attestation_data`` and ``bitfield``.
|
||||
/// Returns the list of validator indices which participiated in the attestation.
|
||||
///
|
||||
/// In effect, this converts the "committee indices" on the bitfield into "validator indices"
|
||||
/// for self.validator_registy.
|
||||
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||
///
|
||||
/// Spec v0.2.0
|
||||
pub fn get_attestation_participants(
|
||||
@ -929,26 +1161,26 @@ impl BeaconState {
|
||||
attestation_data: &AttestationData,
|
||||
bitfield: &Bitfield,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<usize>, AttestationParticipantsError> {
|
||||
let crosslink_committees =
|
||||
self.get_crosslink_committees_at_slot(attestation_data.slot, false, spec)?;
|
||||
) -> Result<Vec<usize>, Error> {
|
||||
let epoch = attestation_data.slot.epoch(spec.epoch_length);
|
||||
let relative_epoch = self.relative_epoch(epoch, spec)?;
|
||||
let cache = self.cache(relative_epoch)?;
|
||||
|
||||
let committee_index: usize = crosslink_committees
|
||||
.iter()
|
||||
.position(|(_committee, shard)| *shard == attestation_data.shard)
|
||||
.ok_or_else(|| AttestationParticipantsError::NoCommitteeForShard)?;
|
||||
let (crosslink_committee, _shard) = &crosslink_committees[committee_index];
|
||||
let (committee_slot_index, committee_index) = cache
|
||||
.shard_committee_index_map
|
||||
.get(&attestation_data.shard)
|
||||
.ok_or_else(|| Error::ShardOutOfBounds)?;
|
||||
let (committee, shard) = &cache.committees[*committee_slot_index][*committee_index];
|
||||
|
||||
/*
|
||||
* TODO: verify bitfield length is valid.
|
||||
*/
|
||||
assert_eq!(*shard, attestation_data.shard, "Bad epoch cache build.");
|
||||
|
||||
let mut participants = vec![];
|
||||
for (i, validator_index) in crosslink_committee.iter().enumerate() {
|
||||
for (i, validator_index) in committee.iter().enumerate() {
|
||||
if bitfield.get(i).unwrap() {
|
||||
participants.push(*validator_index);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(participants)
|
||||
}
|
||||
}
|
||||
@ -957,15 +1189,102 @@ fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
|
||||
Hash256::from(&input.hash_tree_root()[..])
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for AttestationParticipantsError {
|
||||
fn from(e: BeaconStateError) -> AttestationParticipantsError {
|
||||
AttestationParticipantsError::BeaconStateError(e)
|
||||
impl From<Error> for InclusionError {
|
||||
fn from(e: Error) -> InclusionError {
|
||||
InclusionError::Error(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AttestationParticipantsError> for InclusionError {
|
||||
fn from(e: AttestationParticipantsError) -> InclusionError {
|
||||
InclusionError::AttestationParticipantsError(e)
|
||||
impl Encodable for BeaconState {
|
||||
fn ssz_append(&self, s: &mut SszStream) {
|
||||
s.append(&self.slot);
|
||||
s.append(&self.genesis_time);
|
||||
s.append(&self.fork);
|
||||
s.append(&self.validator_registry);
|
||||
s.append(&self.validator_balances);
|
||||
s.append(&self.validator_registry_update_epoch);
|
||||
s.append(&self.latest_randao_mixes);
|
||||
s.append(&self.previous_epoch_start_shard);
|
||||
s.append(&self.current_epoch_start_shard);
|
||||
s.append(&self.previous_calculation_epoch);
|
||||
s.append(&self.current_calculation_epoch);
|
||||
s.append(&self.previous_epoch_seed);
|
||||
s.append(&self.current_epoch_seed);
|
||||
s.append(&self.previous_justified_epoch);
|
||||
s.append(&self.justified_epoch);
|
||||
s.append(&self.justification_bitfield);
|
||||
s.append(&self.finalized_epoch);
|
||||
s.append(&self.latest_crosslinks);
|
||||
s.append(&self.latest_block_roots);
|
||||
s.append(&self.latest_index_roots);
|
||||
s.append(&self.latest_penalized_balances);
|
||||
s.append(&self.latest_attestations);
|
||||
s.append(&self.batched_block_roots);
|
||||
s.append(&self.latest_eth1_data);
|
||||
s.append(&self.eth1_data_votes);
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for BeaconState {
|
||||
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
|
||||
let (slot, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (genesis_time, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (fork, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (validator_registry, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (validator_balances, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (validator_registry_update_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_randao_mixes, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (previous_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (current_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (previous_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (current_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (previous_epoch_seed, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (current_epoch_seed, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (previous_justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (justification_bitfield, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (finalized_epoch, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_index_roots, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_penalized_balances, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?;
|
||||
let (eth1_data_votes, i) = <_>::ssz_decode(bytes, i)?;
|
||||
|
||||
Ok((
|
||||
Self {
|
||||
slot,
|
||||
genesis_time,
|
||||
fork,
|
||||
validator_registry,
|
||||
validator_balances,
|
||||
validator_registry_update_epoch,
|
||||
latest_randao_mixes,
|
||||
previous_epoch_start_shard,
|
||||
current_epoch_start_shard,
|
||||
previous_calculation_epoch,
|
||||
current_calculation_epoch,
|
||||
previous_epoch_seed,
|
||||
current_epoch_seed,
|
||||
previous_justified_epoch,
|
||||
justified_epoch,
|
||||
justification_bitfield,
|
||||
finalized_epoch,
|
||||
latest_crosslinks,
|
||||
latest_block_roots,
|
||||
latest_index_roots,
|
||||
latest_penalized_balances,
|
||||
latest_attestations,
|
||||
batched_block_roots,
|
||||
latest_eth1_data,
|
||||
eth1_data_votes,
|
||||
cache_index_offset: 0,
|
||||
caches: vec![EpochCache::empty(); CACHED_EPOCHS],
|
||||
},
|
||||
i,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1004,3 +1323,37 @@ impl TreeHash for BeaconState {
|
||||
hash(&result)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: RngCore> TestRandom<T> for BeaconState {
|
||||
fn random_for_test(rng: &mut T) -> Self {
|
||||
Self {
|
||||
slot: <_>::random_for_test(rng),
|
||||
genesis_time: <_>::random_for_test(rng),
|
||||
fork: <_>::random_for_test(rng),
|
||||
validator_registry: <_>::random_for_test(rng),
|
||||
validator_balances: <_>::random_for_test(rng),
|
||||
validator_registry_update_epoch: <_>::random_for_test(rng),
|
||||
latest_randao_mixes: <_>::random_for_test(rng),
|
||||
previous_epoch_start_shard: <_>::random_for_test(rng),
|
||||
current_epoch_start_shard: <_>::random_for_test(rng),
|
||||
previous_calculation_epoch: <_>::random_for_test(rng),
|
||||
current_calculation_epoch: <_>::random_for_test(rng),
|
||||
previous_epoch_seed: <_>::random_for_test(rng),
|
||||
current_epoch_seed: <_>::random_for_test(rng),
|
||||
previous_justified_epoch: <_>::random_for_test(rng),
|
||||
justified_epoch: <_>::random_for_test(rng),
|
||||
justification_bitfield: <_>::random_for_test(rng),
|
||||
finalized_epoch: <_>::random_for_test(rng),
|
||||
latest_crosslinks: <_>::random_for_test(rng),
|
||||
latest_block_roots: <_>::random_for_test(rng),
|
||||
latest_index_roots: <_>::random_for_test(rng),
|
||||
latest_penalized_balances: <_>::random_for_test(rng),
|
||||
latest_attestations: <_>::random_for_test(rng),
|
||||
batched_block_roots: <_>::random_for_test(rng),
|
||||
latest_eth1_data: <_>::random_for_test(rng),
|
||||
eth1_data_votes: <_>::random_for_test(rng),
|
||||
cache_index_offset: 0,
|
||||
caches: vec![EpochCache::empty(); CACHED_EPOCHS],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
84
eth2/types/src/beacon_state/epoch_cache.rs
Normal file
84
eth2/types/src/beacon_state/epoch_cache.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use super::{AttestationDutyMap, BeaconState, CrosslinkCommittees, Error, ShardCommitteeIndexMap};
|
||||
use crate::{ChainSpec, Epoch};
|
||||
use log::trace;
|
||||
use serde_derive::Serialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||
pub struct EpochCache {
|
||||
/// True if this cache has been initialized.
|
||||
pub initialized: bool,
|
||||
/// The crosslink committees for an epoch.
|
||||
pub committees: Vec<CrosslinkCommittees>,
|
||||
/// Maps validator index to a slot, shard and committee index for attestation.
|
||||
pub attestation_duty_map: AttestationDutyMap,
|
||||
/// Maps a shard to an index of `self.committees`.
|
||||
pub shard_committee_index_map: ShardCommitteeIndexMap,
|
||||
}
|
||||
|
||||
impl EpochCache {
|
||||
pub fn empty() -> EpochCache {
|
||||
EpochCache {
|
||||
initialized: false,
|
||||
committees: vec![],
|
||||
attestation_duty_map: AttestationDutyMap::new(),
|
||||
shard_committee_index_map: ShardCommitteeIndexMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initialized(
|
||||
state: &BeaconState,
|
||||
epoch: Epoch,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<EpochCache, Error> {
|
||||
let mut epoch_committees: Vec<CrosslinkCommittees> =
|
||||
Vec::with_capacity(spec.epoch_length as usize);
|
||||
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
|
||||
let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new();
|
||||
|
||||
let shuffling =
|
||||
state.get_shuffling_for_slot(epoch.start_slot(spec.epoch_length), false, spec)?;
|
||||
|
||||
for (epoch_committeess_index, slot) in epoch.slot_iter(spec.epoch_length).enumerate() {
|
||||
let slot_committees = state.calculate_crosslink_committees_at_slot(
|
||||
slot,
|
||||
false,
|
||||
shuffling.clone(),
|
||||
spec,
|
||||
)?;
|
||||
|
||||
for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() {
|
||||
// Empty committees are not permitted.
|
||||
if committee.is_empty() {
|
||||
return Err(Error::InsufficientValidators);
|
||||
}
|
||||
|
||||
trace!(
|
||||
"shard: {}, epoch_i: {}, slot_i: {}",
|
||||
shard,
|
||||
epoch_committeess_index,
|
||||
slot_committees_index
|
||||
);
|
||||
|
||||
shard_committee_index_map
|
||||
.insert(*shard, (epoch_committeess_index, slot_committees_index));
|
||||
|
||||
for (committee_index, validator_index) in committee.iter().enumerate() {
|
||||
attestation_duty_map.insert(
|
||||
*validator_index as u64,
|
||||
(slot, *shard, committee_index as u64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
epoch_committees.push(slot_committees)
|
||||
}
|
||||
|
||||
Ok(EpochCache {
|
||||
initialized: true,
|
||||
committees: epoch_committees,
|
||||
attestation_duty_map,
|
||||
shard_committee_index_map,
|
||||
})
|
||||
}
|
||||
}
|
@ -3,8 +3,8 @@
|
||||
use super::*;
|
||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||
use crate::{
|
||||
beacon_state::BeaconStateError, BeaconState, ChainSpec, Deposit, DepositData, DepositInput,
|
||||
Eth1Data, Hash256, Keypair,
|
||||
BeaconState, BeaconStateError, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data,
|
||||
Hash256, Keypair,
|
||||
};
|
||||
use bls::create_proof_of_possession;
|
||||
use ssz::{ssz_encode, Decodable};
|
||||
@ -73,6 +73,53 @@ pub fn can_produce_genesis_block() {
|
||||
builder.build().unwrap();
|
||||
}
|
||||
|
||||
/// Tests that `get_attestation_participants` is consistent with the result of
|
||||
/// get_crosslink_committees_at_slot` with a full bitfield.
|
||||
#[test]
|
||||
pub fn get_attestation_participants_consistency() {
|
||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||
|
||||
let mut builder = BeaconStateTestBuilder::with_random_validators(8);
|
||||
builder.spec = ChainSpec::few_validators();
|
||||
|
||||
let mut state = builder.build().unwrap();
|
||||
let spec = builder.spec.clone();
|
||||
|
||||
state
|
||||
.build_epoch_cache(RelativeEpoch::Previous, &spec)
|
||||
.unwrap();
|
||||
state
|
||||
.build_epoch_cache(RelativeEpoch::Current, &spec)
|
||||
.unwrap();
|
||||
state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap();
|
||||
|
||||
for slot in state
|
||||
.slot
|
||||
.epoch(spec.epoch_length)
|
||||
.slot_iter(spec.epoch_length)
|
||||
{
|
||||
let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap();
|
||||
|
||||
for (committee, shard) in committees {
|
||||
let mut attestation_data = AttestationData::random_for_test(&mut rng);
|
||||
attestation_data.slot = slot;
|
||||
attestation_data.shard = *shard;
|
||||
|
||||
let mut bitfield = Bitfield::new();
|
||||
for (i, _) in committee.iter().enumerate() {
|
||||
bitfield.set(i, true);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
state
|
||||
.get_attestation_participants(&attestation_data, &bitfield, &spec)
|
||||
.unwrap(),
|
||||
*committee
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_ssz_round_trip() {
|
||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||
|
@ -199,7 +199,7 @@ impl ChainSpec {
|
||||
let genesis_epoch = genesis_slot.epoch(epoch_length);
|
||||
|
||||
Self {
|
||||
shard_count: 1,
|
||||
shard_count: 8,
|
||||
target_committee_size: 1,
|
||||
genesis_slot,
|
||||
genesis_epoch,
|
||||
|
@ -12,6 +12,22 @@ pub struct Fork {
|
||||
pub epoch: Epoch,
|
||||
}
|
||||
|
||||
impl Fork {
|
||||
/// Return the fork version of the given ``epoch``.
|
||||
pub fn get_fork_version(&self, epoch: Epoch) -> u64 {
|
||||
if epoch < self.epoch {
|
||||
return self.previous_version;
|
||||
}
|
||||
self.current_version
|
||||
}
|
||||
|
||||
/// Get the domain number that represents the fork meta and signature domain.
|
||||
pub fn get_domain(&self, epoch: Epoch, domain_type: u64) -> u64 {
|
||||
let fork_version = self.get_fork_version(epoch);
|
||||
fork_version * u64::pow(2, 32) + domain_type
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeHash for Fork {
|
||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
||||
let mut result: Vec<u8> = vec![];
|
||||
|
@ -42,7 +42,9 @@ pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit;
|
||||
pub use crate::attester_slashing::AttesterSlashing;
|
||||
pub use crate::beacon_block::BeaconBlock;
|
||||
pub use crate::beacon_block_body::BeaconBlockBody;
|
||||
pub use crate::beacon_state::BeaconState;
|
||||
pub use crate::beacon_state::{
|
||||
BeaconState, Error as BeaconStateError, InclusionError, RelativeEpoch,
|
||||
};
|
||||
pub use crate::casper_slashing::CasperSlashing;
|
||||
pub use crate::chain_spec::ChainSpec;
|
||||
pub use crate::crosslink::Crosslink;
|
||||
|
@ -72,7 +72,7 @@ impl Epoch {
|
||||
|
||||
pub fn slot_iter(&self, epoch_length: u64) -> SlotIter {
|
||||
SlotIter {
|
||||
current: self.start_slot(epoch_length),
|
||||
current_iteration: 0,
|
||||
epoch: self,
|
||||
epoch_length,
|
||||
}
|
||||
@ -80,7 +80,7 @@ impl Epoch {
|
||||
}
|
||||
|
||||
pub struct SlotIter<'a> {
|
||||
current: Slot,
|
||||
current_iteration: u64,
|
||||
epoch: &'a Epoch,
|
||||
epoch_length: u64,
|
||||
}
|
||||
@ -89,12 +89,13 @@ impl<'a> Iterator for SlotIter<'a> {
|
||||
type Item = Slot;
|
||||
|
||||
fn next(&mut self) -> Option<Slot> {
|
||||
if self.current == self.epoch.end_slot(self.epoch_length) {
|
||||
if self.current_iteration >= self.epoch_length {
|
||||
None
|
||||
} else {
|
||||
let previous = self.current;
|
||||
self.current += 1;
|
||||
Some(previous)
|
||||
let start_slot = self.epoch.start_slot(self.epoch_length);
|
||||
let previous = self.current_iteration;
|
||||
self.current_iteration += 1;
|
||||
Some(start_slot + previous)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -115,4 +116,22 @@ mod epoch_tests {
|
||||
use ssz::ssz_encode;
|
||||
|
||||
all_tests!(Epoch);
|
||||
|
||||
#[test]
|
||||
fn slot_iter() {
|
||||
let epoch_length = 8;
|
||||
|
||||
let epoch = Epoch::new(0);
|
||||
|
||||
let mut slots = vec![];
|
||||
for slot in epoch.slot_iter(epoch_length) {
|
||||
slots.push(slot);
|
||||
}
|
||||
|
||||
assert_eq!(slots.len(), epoch_length as usize);
|
||||
|
||||
for i in 0..epoch_length {
|
||||
assert_eq!(Slot::from(i), slots[i as usize])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,6 @@ impl<T: RngCore> TestRandom<T> for Signature {
|
||||
let mut message = vec![0; 32];
|
||||
rng.fill_bytes(&mut message);
|
||||
|
||||
Signature::new(&message, &secret_key)
|
||||
Signature::new(&message, 0, &secret_key)
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "v0.3.0" }
|
||||
bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.5.2" }
|
||||
hashing = { path = "../hashing" }
|
||||
hex = "0.3"
|
||||
serde = "1.0"
|
||||
|
@ -27,8 +27,13 @@ impl AggregateSignature {
|
||||
///
|
||||
/// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys
|
||||
/// that signed the `AggregateSignature`.
|
||||
pub fn verify(&self, msg: &[u8], aggregate_public_key: &AggregatePublicKey) -> bool {
|
||||
self.0.verify(msg, aggregate_public_key)
|
||||
pub fn verify(
|
||||
&self,
|
||||
msg: &[u8],
|
||||
domain: u64,
|
||||
aggregate_public_key: &AggregatePublicKey,
|
||||
) -> bool {
|
||||
self.0.verify(msg, domain, aggregate_public_key)
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,7 +78,7 @@ mod tests {
|
||||
let keypair = Keypair::random();
|
||||
|
||||
let mut original = AggregateSignature::new();
|
||||
original.add(&Signature::new(&[42, 42], &keypair.sk));
|
||||
original.add(&Signature::new(&[42, 42], 0, &keypair.sk));
|
||||
|
||||
let bytes = ssz_encode(&original);
|
||||
let (decoded, _) = AggregateSignature::ssz_decode(&bytes, 0).unwrap();
|
||||
|
@ -1,5 +1,4 @@
|
||||
extern crate bls_aggregates;
|
||||
extern crate hashing;
|
||||
extern crate ssz;
|
||||
|
||||
mod aggregate_signature;
|
||||
@ -16,37 +15,29 @@ pub use crate::signature::Signature;
|
||||
|
||||
pub use self::bls_aggregates::AggregatePublicKey;
|
||||
|
||||
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97;
|
||||
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96;
|
||||
|
||||
use hashing::hash;
|
||||
use ssz::ssz_encode;
|
||||
use std::default::Default;
|
||||
|
||||
fn extend_if_needed(hash: &mut Vec<u8>) {
|
||||
// NOTE: bls_aggregates crate demands 48 bytes, this may be removed as we get closer to production
|
||||
hash.resize(48, Default::default())
|
||||
}
|
||||
|
||||
/// For some signature and public key, ensure that the signature message was the public key and it
|
||||
/// was signed by the secret key that corresponds to that public key.
|
||||
pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool {
|
||||
let mut hash = hash(&ssz_encode(pubkey));
|
||||
extend_if_needed(&mut hash);
|
||||
sig.verify_hashed(&hash, &pubkey)
|
||||
// TODO: replace this function with state.validate_proof_of_possession
|
||||
// https://github.com/sigp/lighthouse/issues/239
|
||||
sig.verify(&ssz_encode(pubkey), 0, &pubkey)
|
||||
}
|
||||
|
||||
// TODO: Update this method
|
||||
// https://github.com/sigp/lighthouse/issues/239
|
||||
pub fn create_proof_of_possession(keypair: &Keypair) -> Signature {
|
||||
let mut hash = hash(&ssz_encode(&keypair.pk));
|
||||
extend_if_needed(&mut hash);
|
||||
Signature::new_hashed(&hash, &keypair.sk)
|
||||
Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk)
|
||||
}
|
||||
|
||||
pub fn bls_verify_aggregate(
|
||||
pubkey: &AggregatePublicKey,
|
||||
message: &[u8],
|
||||
signature: &AggregateSignature,
|
||||
_domain: u64,
|
||||
domain: u64,
|
||||
) -> bool {
|
||||
// TODO: add domain
|
||||
signature.verify(message, pubkey)
|
||||
signature.verify(message, domain, pubkey)
|
||||
}
|
||||
|
@ -14,24 +14,34 @@ pub struct Signature(RawSignature);
|
||||
|
||||
impl Signature {
|
||||
/// Instantiate a new Signature from a message and a SecretKey.
|
||||
pub fn new(msg: &[u8], sk: &SecretKey) -> Self {
|
||||
Signature(RawSignature::new(msg, sk.as_raw()))
|
||||
pub fn new(msg: &[u8], domain: u64, sk: &SecretKey) -> Self {
|
||||
Signature(RawSignature::new(msg, domain, sk.as_raw()))
|
||||
}
|
||||
|
||||
/// Instantiate a new Signature from a message and a SecretKey, where the message has already
|
||||
/// been hashed.
|
||||
pub fn new_hashed(msg_hashed: &[u8], sk: &SecretKey) -> Self {
|
||||
Signature(RawSignature::new_hashed(msg_hashed, sk.as_raw()))
|
||||
pub fn new_hashed(x_real_hashed: &[u8], x_imaginary_hashed: &[u8], sk: &SecretKey) -> Self {
|
||||
Signature(RawSignature::new_hashed(
|
||||
x_real_hashed,
|
||||
x_imaginary_hashed,
|
||||
sk.as_raw(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Verify the Signature against a PublicKey.
|
||||
pub fn verify(&self, msg: &[u8], pk: &PublicKey) -> bool {
|
||||
self.0.verify(msg, pk.as_raw())
|
||||
pub fn verify(&self, msg: &[u8], domain: u64, pk: &PublicKey) -> bool {
|
||||
self.0.verify(msg, domain, pk.as_raw())
|
||||
}
|
||||
|
||||
/// Verify the Signature against a PublicKey, where the message has already been hashed.
|
||||
pub fn verify_hashed(&self, msg_hash: &[u8], pk: &PublicKey) -> bool {
|
||||
self.0.verify_hashed(msg_hash, pk.as_raw())
|
||||
pub fn verify_hashed(
|
||||
&self,
|
||||
x_real_hashed: &[u8],
|
||||
x_imaginary_hashed: &[u8],
|
||||
pk: &PublicKey,
|
||||
) -> bool {
|
||||
self.0
|
||||
.verify_hashed(x_real_hashed, x_imaginary_hashed, pk.as_raw())
|
||||
}
|
||||
|
||||
/// Returns the underlying signature.
|
||||
@ -41,7 +51,9 @@ impl Signature {
|
||||
|
||||
/// Returns a new empty signature.
|
||||
pub fn empty_signature() -> Self {
|
||||
let empty: Vec<u8> = vec![0; 97];
|
||||
let mut empty: Vec<u8> = vec![0; 96];
|
||||
// TODO: Modify the way flags are used (b_flag should not be used for empty_signature in the future)
|
||||
empty[0] += u8::pow(2, 6);
|
||||
Signature(RawSignature::from_bytes(&empty).unwrap())
|
||||
}
|
||||
}
|
||||
@ -85,7 +97,7 @@ mod tests {
|
||||
pub fn test_ssz_round_trip() {
|
||||
let keypair = Keypair::random();
|
||||
|
||||
let original = Signature::new(&[42, 42], &keypair.sk);
|
||||
let original = Signature::new(&[42, 42], 0, &keypair.sk);
|
||||
|
||||
let bytes = ssz_encode(&original);
|
||||
let (decoded, _) = Signature::ssz_decode(&bytes, 0).unwrap();
|
||||
@ -99,9 +111,13 @@ mod tests {
|
||||
|
||||
let sig_as_bytes: Vec<u8> = sig.as_raw().as_bytes();
|
||||
|
||||
assert_eq!(sig_as_bytes.len(), 97);
|
||||
for one_byte in sig_as_bytes.iter() {
|
||||
assert_eq!(*one_byte, 0);
|
||||
assert_eq!(sig_as_bytes.len(), 96);
|
||||
for (i, one_byte) in sig_as_bytes.iter().enumerate() {
|
||||
if i == 0 {
|
||||
assert_eq!(*one_byte, u8::pow(2, 6));
|
||||
} else {
|
||||
assert_eq!(*one_byte, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user