Merge branch 'master' into 215-migrate-ssz-little-endian
This commit is contained in:
commit
2c95d531ea
@ -9,12 +9,14 @@ members = [
|
|||||||
"eth2/utils/boolean-bitfield",
|
"eth2/utils/boolean-bitfield",
|
||||||
"eth2/utils/hashing",
|
"eth2/utils/hashing",
|
||||||
"eth2/utils/honey-badger-split",
|
"eth2/utils/honey-badger-split",
|
||||||
|
"eth2/utils/merkle_proof",
|
||||||
"eth2/utils/int_to_bytes",
|
"eth2/utils/int_to_bytes",
|
||||||
"eth2/utils/slot_clock",
|
"eth2/utils/slot_clock",
|
||||||
"eth2/utils/ssz",
|
"eth2/utils/ssz",
|
||||||
"eth2/utils/ssz_derive",
|
"eth2/utils/ssz_derive",
|
||||||
"eth2/utils/swap_or_not_shuffle",
|
"eth2/utils/swap_or_not_shuffle",
|
||||||
"eth2/utils/fisher_yates_shuffle",
|
"eth2/utils/fisher_yates_shuffle",
|
||||||
|
"eth2/utils/test_random_derive",
|
||||||
"beacon_node",
|
"beacon_node",
|
||||||
"beacon_node/db",
|
"beacon_node/db",
|
||||||
"beacon_node/beacon_chain",
|
"beacon_node/beacon_chain",
|
||||||
|
25
README.md
25
README.md
@ -29,6 +29,19 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
|||||||
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the
|
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the
|
||||||
[company website](https://sigmaprime.io).
|
[company website](https://sigmaprime.io).
|
||||||
|
|
||||||
|
### Directory Structure
|
||||||
|
|
||||||
|
- [`beacon_node/`](beacon_node/): the "Beacon Node" binary and crates exclusively
|
||||||
|
associated with it.
|
||||||
|
- [`docs/`](docs/): documentation related to the repository. This includes contributor
|
||||||
|
guides, etc. (It does not include code documentation, which can be produced with `cargo doc`).
|
||||||
|
- [`eth2/`](eth2/): Crates containing common logic across the Lighthouse project. For
|
||||||
|
example: Ethereum 2.0 types ([`BeaconBlock`](eth2/types/src/beacon_block.rs), [`BeaconState`](eth2/types/src/beacon_state.rs), etc) and
|
||||||
|
SimpleSerialize (SSZ).
|
||||||
|
- [`protos/`](protos/): protobuf/gRPC definitions that are common across the Lighthouse project.
|
||||||
|
- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively
|
||||||
|
associated with it.
|
||||||
|
|
||||||
### Components
|
### Components
|
||||||
|
|
||||||
The following list describes some of the components actively under development
|
The following list describes some of the components actively under development
|
||||||
@ -61,7 +74,7 @@ by the team:
|
|||||||
from the Ethereum Foundation to develop *simpleserialize* (SSZ), a
|
from the Ethereum Foundation to develop *simpleserialize* (SSZ), a
|
||||||
purpose-built serialization format for sending information across a network.
|
purpose-built serialization format for sending information across a network.
|
||||||
Check out the [SSZ
|
Check out the [SSZ
|
||||||
implementation](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz)
|
implementation](https://github.com/ethereum/eth2.0-specs/blob/00aa553fee95963b74fbec84dbd274d7247b8a0e/specs/simple-serialize.md)
|
||||||
and this
|
and this
|
||||||
[research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md)
|
[research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md)
|
||||||
on serialization formats for more information.
|
on serialization formats for more information.
|
||||||
@ -79,16 +92,6 @@ In addition to these components we are also working on database schemas, RPC
|
|||||||
frameworks, specification development, database optimizations (e.g.,
|
frameworks, specification development, database optimizations (e.g.,
|
||||||
bloom-filters), and tons of other interesting stuff (at least we think so).
|
bloom-filters), and tons of other interesting stuff (at least we think so).
|
||||||
|
|
||||||
### Directory Structure
|
|
||||||
|
|
||||||
Here we provide an overview of the directory structure:
|
|
||||||
|
|
||||||
- `beacon_chain/`: contains logic derived directly from the specification.
|
|
||||||
E.g., shuffling algorithms, state transition logic and structs, block
|
|
||||||
validation, BLS crypto, etc.
|
|
||||||
- `lighthouse/`: contains logic specific to this client implementation. E.g.,
|
|
||||||
CLI parsing, RPC end-points, databases, etc.
|
|
||||||
|
|
||||||
### Running
|
### Running
|
||||||
|
|
||||||
**NOTE: The cryptography libraries used in this implementation are
|
**NOTE: The cryptography libraries used in this implementation are
|
||||||
|
@ -1,10 +1,8 @@
|
|||||||
use crate::cached_beacon_state::CachedBeaconState;
|
use log::trace;
|
||||||
use state_processing::validate_attestation_without_signature;
|
use ssz::TreeHash;
|
||||||
|
use state_processing::per_block_processing::validate_attestation_without_signature;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use types::{
|
use types::*;
|
||||||
beacon_state::BeaconStateError, AggregateSignature, Attestation, AttestationData, BeaconState,
|
|
||||||
Bitfield, ChainSpec, FreeAttestation, Signature,
|
|
||||||
};
|
|
||||||
|
|
||||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
const PHASE_0_CUSTODY_BIT: bool = false;
|
||||||
|
|
||||||
@ -42,21 +40,28 @@ pub enum Message {
|
|||||||
BadSignature,
|
BadSignature,
|
||||||
/// The given `slot` does not match the validators committee assignment.
|
/// The given `slot` does not match the validators committee assignment.
|
||||||
BadSlot,
|
BadSlot,
|
||||||
/// The given `shard` does not match the validators committee assignment.
|
/// The given `shard` does not match the validators committee assignment, or is not included in
|
||||||
|
/// a committee for the given slot.
|
||||||
BadShard,
|
BadShard,
|
||||||
|
/// Attestation is from the epoch prior to this, ignoring.
|
||||||
|
TooOld,
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! some_or_invalid {
|
macro_rules! valid_outcome {
|
||||||
($expression: expr, $error: expr) => {
|
($error: expr) => {
|
||||||
match $expression {
|
return Ok(Outcome {
|
||||||
Some(x) => x,
|
valid: true,
|
||||||
None => {
|
message: $error,
|
||||||
return Ok(Outcome {
|
});
|
||||||
valid: false,
|
};
|
||||||
message: $error,
|
}
|
||||||
});
|
|
||||||
}
|
macro_rules! invalid_outcome {
|
||||||
}
|
($error: expr) => {
|
||||||
|
return Ok(Outcome {
|
||||||
|
valid: false,
|
||||||
|
message: $error,
|
||||||
|
});
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,49 +82,61 @@ impl AttestationAggregator {
|
|||||||
/// - The signature is verified against that of the validator at `validator_index`.
|
/// - The signature is verified against that of the validator at `validator_index`.
|
||||||
pub fn process_free_attestation(
|
pub fn process_free_attestation(
|
||||||
&mut self,
|
&mut self,
|
||||||
cached_state: &CachedBeaconState,
|
state: &BeaconState,
|
||||||
free_attestation: &FreeAttestation,
|
free_attestation: &FreeAttestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Outcome, BeaconStateError> {
|
) -> Result<Outcome, BeaconStateError> {
|
||||||
let (slot, shard, committee_index) = some_or_invalid!(
|
let attestation_duties = match state.attestation_slot_and_shard_for_validator(
|
||||||
cached_state.attestation_slot_and_shard_for_validator(
|
free_attestation.validator_index as usize,
|
||||||
free_attestation.validator_index as usize,
|
spec,
|
||||||
spec,
|
) {
|
||||||
)?,
|
Err(BeaconStateError::EpochCacheUninitialized(e)) => {
|
||||||
Message::BadValidatorIndex
|
panic!("Attempted to access unbuilt cache {:?}.", e)
|
||||||
|
}
|
||||||
|
Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld),
|
||||||
|
Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard),
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
Ok(None) => invalid_outcome!(Message::BadValidatorIndex),
|
||||||
|
Ok(Some(attestation_duties)) => attestation_duties,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (slot, shard, committee_index) = attestation_duties;
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"slot: {}, shard: {}, committee_index: {}, val_index: {}",
|
||||||
|
slot,
|
||||||
|
shard,
|
||||||
|
committee_index,
|
||||||
|
free_attestation.validator_index
|
||||||
);
|
);
|
||||||
|
|
||||||
if free_attestation.data.slot != slot {
|
if free_attestation.data.slot != slot {
|
||||||
return Ok(Outcome {
|
invalid_outcome!(Message::BadSlot);
|
||||||
valid: false,
|
|
||||||
message: Message::BadSlot,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
if free_attestation.data.shard != shard {
|
if free_attestation.data.shard != shard {
|
||||||
return Ok(Outcome {
|
invalid_outcome!(Message::BadShard);
|
||||||
valid: false,
|
|
||||||
message: Message::BadShard,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT);
|
let signable_message = AttestationDataAndCustodyBit {
|
||||||
|
data: free_attestation.data.clone(),
|
||||||
|
custody_bit: PHASE_0_CUSTODY_BIT,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
|
||||||
let validator_record = some_or_invalid!(
|
let validator_record = match state
|
||||||
cached_state
|
.validator_registry
|
||||||
.state
|
.get(free_attestation.validator_index as usize)
|
||||||
.validator_registry
|
|
||||||
.get(free_attestation.validator_index as usize),
|
|
||||||
Message::BadValidatorIndex
|
|
||||||
);
|
|
||||||
|
|
||||||
if !free_attestation
|
|
||||||
.signature
|
|
||||||
.verify(&signable_message, &validator_record.pubkey)
|
|
||||||
{
|
{
|
||||||
return Ok(Outcome {
|
None => invalid_outcome!(Message::BadValidatorIndex),
|
||||||
valid: false,
|
Some(validator_record) => validator_record,
|
||||||
message: Message::BadSignature,
|
};
|
||||||
});
|
|
||||||
|
if !free_attestation.signature.verify(
|
||||||
|
&signable_message,
|
||||||
|
spec.get_domain(state.current_epoch(spec), Domain::Attestation, &state.fork),
|
||||||
|
&validator_record.pubkey,
|
||||||
|
) {
|
||||||
|
invalid_outcome!(Message::BadSignature);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(existing_attestation) = self.store.get(&signable_message) {
|
if let Some(existing_attestation) = self.store.get(&signable_message) {
|
||||||
@ -129,15 +146,9 @@ impl AttestationAggregator {
|
|||||||
committee_index as usize,
|
committee_index as usize,
|
||||||
) {
|
) {
|
||||||
self.store.insert(signable_message, updated_attestation);
|
self.store.insert(signable_message, updated_attestation);
|
||||||
Ok(Outcome {
|
valid_outcome!(Message::Aggregated);
|
||||||
valid: true,
|
|
||||||
message: Message::Aggregated,
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
Ok(Outcome {
|
valid_outcome!(Message::AggregationNotRequired);
|
||||||
valid: true,
|
|
||||||
message: Message::AggregationNotRequired,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut aggregate_signature = AggregateSignature::new();
|
let mut aggregate_signature = AggregateSignature::new();
|
||||||
@ -151,10 +162,7 @@ impl AttestationAggregator {
|
|||||||
aggregate_signature,
|
aggregate_signature,
|
||||||
};
|
};
|
||||||
self.store.insert(signable_message, new_attestation);
|
self.store.insert(signable_message, new_attestation);
|
||||||
Ok(Outcome {
|
valid_outcome!(Message::NewAttestationCreated);
|
||||||
valid: true,
|
|
||||||
message: Message::NewAttestationCreated,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
|
use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome};
|
||||||
use crate::cached_beacon_state::CachedBeaconState;
|
|
||||||
use crate::checkpoint::CheckPoint;
|
use crate::checkpoint::CheckPoint;
|
||||||
|
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
||||||
use db::{
|
use db::{
|
||||||
stores::{BeaconBlockStore, BeaconStateStore},
|
stores::{BeaconBlockStore, BeaconStateStore},
|
||||||
ClientDB, DBError,
|
ClientDB, DBError,
|
||||||
@ -11,28 +11,15 @@ use parking_lot::{RwLock, RwLockReadGuard};
|
|||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use ssz::ssz_encode;
|
use ssz::ssz_encode;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError,
|
per_block_processing, per_block_processing_without_verifying_block_signature,
|
||||||
|
per_slot_processing, BlockProcessingError, SlotProcessingError,
|
||||||
};
|
};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{
|
use types::{
|
||||||
beacon_state::BeaconStateError,
|
|
||||||
readers::{BeaconBlockReader, BeaconStateReader},
|
readers::{BeaconBlockReader, BeaconStateReader},
|
||||||
AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Crosslink, Deposit,
|
*,
|
||||||
Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, Signature, Slot,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
InsufficientValidators,
|
|
||||||
BadRecentBlockRoots,
|
|
||||||
BeaconStateError(BeaconStateError),
|
|
||||||
DBInconsistent(String),
|
|
||||||
DBError(String),
|
|
||||||
ForkChoiceError(ForkChoiceError),
|
|
||||||
MissingBeaconBlock(Hash256),
|
|
||||||
MissingBeaconState(Hash256),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum ValidBlock {
|
pub enum ValidBlock {
|
||||||
/// The block was successfully processed.
|
/// The block was successfully processed.
|
||||||
@ -67,10 +54,14 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
|
|||||||
pub state_store: Arc<BeaconStateStore<T>>,
|
pub state_store: Arc<BeaconStateStore<T>>,
|
||||||
pub slot_clock: U,
|
pub slot_clock: U,
|
||||||
pub attestation_aggregator: RwLock<AttestationAggregator>,
|
pub attestation_aggregator: RwLock<AttestationAggregator>,
|
||||||
|
pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
|
||||||
|
pub exits_for_inclusion: RwLock<Vec<VoluntaryExit>>,
|
||||||
|
pub transfers_for_inclusion: RwLock<Vec<Transfer>>,
|
||||||
|
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
|
||||||
|
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
|
||||||
canonical_head: RwLock<CheckPoint>,
|
canonical_head: RwLock<CheckPoint>,
|
||||||
finalized_head: RwLock<CheckPoint>,
|
finalized_head: RwLock<CheckPoint>,
|
||||||
pub state: RwLock<BeaconState>,
|
pub state: RwLock<BeaconState>,
|
||||||
pub cached_state: RwLock<CachedBeaconState>,
|
|
||||||
pub spec: ChainSpec,
|
pub spec: ChainSpec,
|
||||||
pub fork_choice: RwLock<F>,
|
pub fork_choice: RwLock<F>,
|
||||||
}
|
}
|
||||||
@ -82,6 +73,7 @@ where
|
|||||||
F: ForkChoice,
|
F: ForkChoice,
|
||||||
{
|
{
|
||||||
/// Instantiate a new Beacon Chain, from genesis.
|
/// Instantiate a new Beacon Chain, from genesis.
|
||||||
|
#[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks.
|
||||||
pub fn genesis(
|
pub fn genesis(
|
||||||
state_store: Arc<BeaconStateStore<T>>,
|
state_store: Arc<BeaconStateStore<T>>,
|
||||||
block_store: Arc<BeaconBlockStore<T>>,
|
block_store: Arc<BeaconBlockStore<T>>,
|
||||||
@ -96,7 +88,7 @@ where
|
|||||||
return Err(Error::InsufficientValidators);
|
return Err(Error::InsufficientValidators);
|
||||||
}
|
}
|
||||||
|
|
||||||
let genesis_state = BeaconState::genesis(
|
let mut genesis_state = BeaconState::genesis(
|
||||||
genesis_time,
|
genesis_time,
|
||||||
initial_validator_deposits,
|
initial_validator_deposits,
|
||||||
latest_eth1_data,
|
latest_eth1_data,
|
||||||
@ -109,32 +101,37 @@ where
|
|||||||
let block_root = genesis_block.canonical_root();
|
let block_root = genesis_block.canonical_root();
|
||||||
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
|
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
|
||||||
|
|
||||||
let cached_state = RwLock::new(CachedBeaconState::from_beacon_state(
|
|
||||||
genesis_state.clone(),
|
|
||||||
spec.clone(),
|
|
||||||
)?);
|
|
||||||
|
|
||||||
let finalized_head = RwLock::new(CheckPoint::new(
|
let finalized_head = RwLock::new(CheckPoint::new(
|
||||||
genesis_block.clone(),
|
genesis_block.clone(),
|
||||||
block_root,
|
block_root,
|
||||||
|
// TODO: this is a memory waste; remove full clone.
|
||||||
genesis_state.clone(),
|
genesis_state.clone(),
|
||||||
state_root,
|
state_root,
|
||||||
));
|
));
|
||||||
let canonical_head = RwLock::new(CheckPoint::new(
|
let canonical_head = RwLock::new(CheckPoint::new(
|
||||||
genesis_block.clone(),
|
genesis_block.clone(),
|
||||||
block_root,
|
block_root,
|
||||||
|
// TODO: this is a memory waste; remove full clone.
|
||||||
genesis_state.clone(),
|
genesis_state.clone(),
|
||||||
state_root,
|
state_root,
|
||||||
));
|
));
|
||||||
let attestation_aggregator = RwLock::new(AttestationAggregator::new());
|
let attestation_aggregator = RwLock::new(AttestationAggregator::new());
|
||||||
|
|
||||||
|
genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?;
|
||||||
|
genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?;
|
||||||
|
genesis_state.build_epoch_cache(RelativeEpoch::Next, &spec)?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block_store,
|
block_store,
|
||||||
state_store,
|
state_store,
|
||||||
slot_clock,
|
slot_clock,
|
||||||
attestation_aggregator,
|
attestation_aggregator,
|
||||||
state: RwLock::new(genesis_state.clone()),
|
deposits_for_inclusion: RwLock::new(vec![]),
|
||||||
cached_state,
|
exits_for_inclusion: RwLock::new(vec![]),
|
||||||
|
transfers_for_inclusion: RwLock::new(vec![]),
|
||||||
|
proposer_slashings_for_inclusion: RwLock::new(vec![]),
|
||||||
|
attester_slashings_for_inclusion: RwLock::new(vec![]),
|
||||||
|
state: RwLock::new(genesis_state),
|
||||||
finalized_head,
|
finalized_head,
|
||||||
canonical_head,
|
canonical_head,
|
||||||
spec,
|
spec,
|
||||||
@ -150,6 +147,10 @@ where
|
|||||||
new_beacon_state: BeaconState,
|
new_beacon_state: BeaconState,
|
||||||
new_beacon_state_root: Hash256,
|
new_beacon_state_root: Hash256,
|
||||||
) {
|
) {
|
||||||
|
debug!(
|
||||||
|
"Updating canonical head with block at slot: {}",
|
||||||
|
new_beacon_block.slot
|
||||||
|
);
|
||||||
let mut head = self.canonical_head.write();
|
let mut head = self.canonical_head.write();
|
||||||
head.update(
|
head.update(
|
||||||
new_beacon_block,
|
new_beacon_block,
|
||||||
@ -206,9 +207,7 @@ where
|
|||||||
let state_slot = self.state.read().slot;
|
let state_slot = self.state.read().slot;
|
||||||
let head_block_root = self.head().beacon_block_root;
|
let head_block_root = self.head().beacon_block_root;
|
||||||
for _ in state_slot.as_u64()..slot.as_u64() {
|
for _ in state_slot.as_u64()..slot.as_u64() {
|
||||||
self.state
|
per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?;
|
||||||
.write()
|
|
||||||
.per_slot_processing(head_block_root, &self.spec)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -288,7 +287,7 @@ where
|
|||||||
validator_index
|
validator_index
|
||||||
);
|
);
|
||||||
if let Some((slot, shard, _committee)) = self
|
if let Some((slot, shard, _committee)) = self
|
||||||
.cached_state
|
.state
|
||||||
.read()
|
.read()
|
||||||
.attestation_slot_and_shard_for_validator(validator_index, &self.spec)?
|
.attestation_slot_and_shard_for_validator(validator_index, &self.spec)?
|
||||||
{
|
{
|
||||||
@ -306,7 +305,7 @@ where
|
|||||||
.state
|
.state
|
||||||
.read()
|
.read()
|
||||||
.get_block_root(
|
.get_block_root(
|
||||||
justified_epoch.start_slot(self.spec.epoch_length),
|
justified_epoch.start_slot(self.spec.slots_per_epoch),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
)
|
)
|
||||||
.ok_or_else(|| Error::BadRecentBlockRoots)?;
|
.ok_or_else(|| Error::BadRecentBlockRoots)?;
|
||||||
@ -325,10 +324,10 @@ where
|
|||||||
shard,
|
shard,
|
||||||
beacon_block_root: self.head().beacon_block_root,
|
beacon_block_root: self.head().beacon_block_root,
|
||||||
epoch_boundary_root,
|
epoch_boundary_root,
|
||||||
shard_block_root: Hash256::zero(),
|
crosslink_data_root: Hash256::zero(),
|
||||||
latest_crosslink: Crosslink {
|
latest_crosslink: Crosslink {
|
||||||
epoch: self.state.read().slot.epoch(self.spec.epoch_length),
|
epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch),
|
||||||
shard_block_root: Hash256::zero(),
|
crosslink_data_root: Hash256::zero(),
|
||||||
},
|
},
|
||||||
justified_epoch,
|
justified_epoch,
|
||||||
justified_block_root,
|
justified_block_root,
|
||||||
@ -346,7 +345,7 @@ where
|
|||||||
let aggregation_outcome = self
|
let aggregation_outcome = self
|
||||||
.attestation_aggregator
|
.attestation_aggregator
|
||||||
.write()
|
.write()
|
||||||
.process_free_attestation(&self.cached_state.read(), &free_attestation, &self.spec)?;
|
.process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?;
|
||||||
|
|
||||||
// return if the attestation is invalid
|
// return if the attestation is invalid
|
||||||
if !aggregation_outcome.valid {
|
if !aggregation_outcome.valid {
|
||||||
@ -362,6 +361,222 @@ where
|
|||||||
Ok(aggregation_outcome)
|
Ok(aggregation_outcome)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Accept some deposit and queue it for inclusion in an appropriate block.
|
||||||
|
pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) {
|
||||||
|
// TODO: deposits are not checked for validity; check them.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/276
|
||||||
|
self.deposits_for_inclusion.write().push(deposit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a vec of deposits suitable for inclusion in some block.
|
||||||
|
pub fn get_deposits_for_block(&self) -> Vec<Deposit> {
|
||||||
|
// TODO: deposits are indiscriminately included; check them for validity.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
self.deposits_for_inclusion.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
|
||||||
|
/// inclusion queue.
|
||||||
|
///
|
||||||
|
/// This ensures that `Deposits` are not included twice in successive blocks.
|
||||||
|
pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) {
|
||||||
|
// TODO: method does not take forks into account; consider this.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
let mut indices_to_delete = vec![];
|
||||||
|
|
||||||
|
for included in included_deposits {
|
||||||
|
for (i, for_inclusion) in self.deposits_for_inclusion.read().iter().enumerate() {
|
||||||
|
if included == for_inclusion {
|
||||||
|
indices_to_delete.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let deposits_for_inclusion = &mut self.deposits_for_inclusion.write();
|
||||||
|
for i in indices_to_delete {
|
||||||
|
deposits_for_inclusion.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept some exit and queue it for inclusion in an appropriate block.
|
||||||
|
pub fn receive_exit_for_inclusion(&self, exit: VoluntaryExit) {
|
||||||
|
// TODO: exits are not checked for validity; check them.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/276
|
||||||
|
self.exits_for_inclusion.write().push(exit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a vec of exits suitable for inclusion in some block.
|
||||||
|
pub fn get_exits_for_block(&self) -> Vec<VoluntaryExit> {
|
||||||
|
// TODO: exits are indiscriminately included; check them for validity.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
self.exits_for_inclusion.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
|
||||||
|
/// inclusion queue.
|
||||||
|
///
|
||||||
|
/// This ensures that `Deposits` are not included twice in successive blocks.
|
||||||
|
pub fn set_exits_as_included(&self, included_exits: &[VoluntaryExit]) {
|
||||||
|
// TODO: method does not take forks into account; consider this.
|
||||||
|
let mut indices_to_delete = vec![];
|
||||||
|
|
||||||
|
for included in included_exits {
|
||||||
|
for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() {
|
||||||
|
if included == for_inclusion {
|
||||||
|
indices_to_delete.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let exits_for_inclusion = &mut self.exits_for_inclusion.write();
|
||||||
|
for i in indices_to_delete {
|
||||||
|
exits_for_inclusion.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept some transfer and queue it for inclusion in an appropriate block.
|
||||||
|
pub fn receive_transfer_for_inclusion(&self, transfer: Transfer) {
|
||||||
|
// TODO: transfers are not checked for validity; check them.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/276
|
||||||
|
self.transfers_for_inclusion.write().push(transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a vec of transfers suitable for inclusion in some block.
|
||||||
|
pub fn get_transfers_for_block(&self) -> Vec<Transfer> {
|
||||||
|
// TODO: transfers are indiscriminately included; check them for validity.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
self.transfers_for_inclusion.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
|
||||||
|
/// inclusion queue.
|
||||||
|
///
|
||||||
|
/// This ensures that `Deposits` are not included twice in successive blocks.
|
||||||
|
pub fn set_transfers_as_included(&self, included_transfers: &[Transfer]) {
|
||||||
|
// TODO: method does not take forks into account; consider this.
|
||||||
|
let mut indices_to_delete = vec![];
|
||||||
|
|
||||||
|
for included in included_transfers {
|
||||||
|
for (i, for_inclusion) in self.transfers_for_inclusion.read().iter().enumerate() {
|
||||||
|
if included == for_inclusion {
|
||||||
|
indices_to_delete.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let transfers_for_inclusion = &mut self.transfers_for_inclusion.write();
|
||||||
|
for i in indices_to_delete {
|
||||||
|
transfers_for_inclusion.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept some proposer slashing and queue it for inclusion in an appropriate block.
|
||||||
|
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) {
|
||||||
|
// TODO: proposer_slashings are not checked for validity; check them.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/276
|
||||||
|
self.proposer_slashings_for_inclusion
|
||||||
|
.write()
|
||||||
|
.push(proposer_slashing);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a vec of proposer slashings suitable for inclusion in some block.
|
||||||
|
pub fn get_proposer_slashings_for_block(&self) -> Vec<ProposerSlashing> {
|
||||||
|
// TODO: proposer_slashings are indiscriminately included; check them for validity.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
self.proposer_slashings_for_inclusion.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of `ProposerSlashings` that were included in recent blocks and removes them
|
||||||
|
/// from the inclusion queue.
|
||||||
|
///
|
||||||
|
/// This ensures that `ProposerSlashings` are not included twice in successive blocks.
|
||||||
|
pub fn set_proposer_slashings_as_included(
|
||||||
|
&self,
|
||||||
|
included_proposer_slashings: &[ProposerSlashing],
|
||||||
|
) {
|
||||||
|
// TODO: method does not take forks into account; consider this.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
let mut indices_to_delete = vec![];
|
||||||
|
|
||||||
|
for included in included_proposer_slashings {
|
||||||
|
for (i, for_inclusion) in self
|
||||||
|
.proposer_slashings_for_inclusion
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
{
|
||||||
|
if included == for_inclusion {
|
||||||
|
indices_to_delete.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let proposer_slashings_for_inclusion = &mut self.proposer_slashings_for_inclusion.write();
|
||||||
|
for i in indices_to_delete {
|
||||||
|
proposer_slashings_for_inclusion.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
|
||||||
|
pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) {
|
||||||
|
// TODO: attester_slashings are not checked for validity; check them.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/276
|
||||||
|
self.attester_slashings_for_inclusion
|
||||||
|
.write()
|
||||||
|
.push(attester_slashing);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a vec of attester slashings suitable for inclusion in some block.
|
||||||
|
pub fn get_attester_slashings_for_block(&self) -> Vec<AttesterSlashing> {
|
||||||
|
// TODO: attester_slashings are indiscriminately included; check them for validity.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
self.attester_slashings_for_inclusion.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes a list of `AttesterSlashings` that were included in recent blocks and removes them
|
||||||
|
/// from the inclusion queue.
|
||||||
|
///
|
||||||
|
/// This ensures that `AttesterSlashings` are not included twice in successive blocks.
|
||||||
|
pub fn set_attester_slashings_as_included(
|
||||||
|
&self,
|
||||||
|
included_attester_slashings: &[AttesterSlashing],
|
||||||
|
) {
|
||||||
|
// TODO: method does not take forks into account; consider this.
|
||||||
|
//
|
||||||
|
// https://github.com/sigp/lighthouse/issues/275
|
||||||
|
let mut indices_to_delete = vec![];
|
||||||
|
|
||||||
|
for included in included_attester_slashings {
|
||||||
|
for (i, for_inclusion) in self
|
||||||
|
.attester_slashings_for_inclusion
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
{
|
||||||
|
if included == for_inclusion {
|
||||||
|
indices_to_delete.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let attester_slashings_for_inclusion = &mut self.attester_slashings_for_inclusion.write();
|
||||||
|
for i in indices_to_delete {
|
||||||
|
attester_slashings_for_inclusion.remove(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
||||||
///
|
///
|
||||||
/// This could be a very expensive operation and should only be done in testing/analysis
|
/// This could be a very expensive operation and should only be done in testing/analysis
|
||||||
@ -410,6 +625,8 @@ where
|
|||||||
last_slot = slot;
|
last_slot = slot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dump.reverse();
|
||||||
|
|
||||||
Ok(dump)
|
Ok(dump)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,7 +676,7 @@ where
|
|||||||
// Transition the parent state to the present slot.
|
// Transition the parent state to the present slot.
|
||||||
let mut state = parent_state;
|
let mut state = parent_state;
|
||||||
for _ in state.slot.as_u64()..present_slot.as_u64() {
|
for _ in state.slot.as_u64()..present_slot.as_u64() {
|
||||||
if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) {
|
if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) {
|
||||||
return Ok(BlockProcessingOutcome::InvalidBlock(
|
return Ok(BlockProcessingOutcome::InvalidBlock(
|
||||||
InvalidBlock::SlotProcessingError(e),
|
InvalidBlock::SlotProcessingError(e),
|
||||||
));
|
));
|
||||||
@ -468,7 +685,7 @@ where
|
|||||||
|
|
||||||
// Apply the received block to its parent state (which has been transitioned into this
|
// Apply the received block to its parent state (which has been transitioned into this
|
||||||
// slot).
|
// slot).
|
||||||
if let Err(e) = state.per_block_processing(&block, &self.spec) {
|
if let Err(e) = per_block_processing(&mut state, &block, &self.spec) {
|
||||||
return Ok(BlockProcessingOutcome::InvalidBlock(
|
return Ok(BlockProcessingOutcome::InvalidBlock(
|
||||||
InvalidBlock::PerBlockProcessingError(e),
|
InvalidBlock::PerBlockProcessingError(e),
|
||||||
));
|
));
|
||||||
@ -486,6 +703,13 @@ where
|
|||||||
self.block_store.put(&block_root, &ssz_encode(&block)[..])?;
|
self.block_store.put(&block_root, &ssz_encode(&block)[..])?;
|
||||||
self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
|
self.state_store.put(&state_root, &ssz_encode(&state)[..])?;
|
||||||
|
|
||||||
|
// Update the inclusion queues so they aren't re-submitted.
|
||||||
|
self.set_deposits_as_included(&block.body.deposits[..]);
|
||||||
|
self.set_transfers_as_included(&block.body.transfers[..]);
|
||||||
|
self.set_exits_as_included(&block.body.voluntary_exits[..]);
|
||||||
|
self.set_proposer_slashings_as_included(&block.body.proposer_slashings[..]);
|
||||||
|
self.set_attester_slashings_as_included(&block.body.attester_slashings[..]);
|
||||||
|
|
||||||
// run the fork_choice add_block logic
|
// run the fork_choice add_block logic
|
||||||
self.fork_choice
|
self.fork_choice
|
||||||
.write()
|
.write()
|
||||||
@ -496,17 +720,9 @@ where
|
|||||||
// TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be
|
// TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be
|
||||||
// run instead.
|
// run instead.
|
||||||
if self.head().beacon_block_root == parent_block_root {
|
if self.head().beacon_block_root == parent_block_root {
|
||||||
self.update_canonical_head(
|
self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
|
||||||
block.clone(),
|
|
||||||
block_root.clone(),
|
|
||||||
state.clone(),
|
|
||||||
state_root,
|
|
||||||
);
|
|
||||||
// Update the local state variable.
|
// Update the local state variable.
|
||||||
*self.state.write() = state.clone();
|
*self.state.write() = state;
|
||||||
// Update the cached state variable.
|
|
||||||
*self.cached_state.write() =
|
|
||||||
CachedBeaconState::from_beacon_state(state.clone(), self.spec.clone())?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
|
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed))
|
||||||
@ -516,7 +732,10 @@ where
|
|||||||
///
|
///
|
||||||
/// The produced block will not be inherently valid, it must be signed by a block producer.
|
/// The produced block will not be inherently valid, it must be signed by a block producer.
|
||||||
/// Block signing is out of the scope of this function and should be done by a separate program.
|
/// Block signing is out of the scope of this function and should be done by a separate program.
|
||||||
pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> {
|
pub fn produce_block(
|
||||||
|
&self,
|
||||||
|
randao_reveal: Signature,
|
||||||
|
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
|
||||||
debug!("Producing block at slot {}...", self.state.read().slot);
|
debug!("Producing block at slot {}...", self.state.read().slot);
|
||||||
|
|
||||||
let mut state = self.state.read().clone();
|
let mut state = self.state.read().clone();
|
||||||
@ -533,7 +752,9 @@ where
|
|||||||
attestations.len()
|
attestations.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?;
|
let parent_root = *state
|
||||||
|
.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)
|
||||||
|
.ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?;
|
||||||
|
|
||||||
let mut block = BeaconBlock {
|
let mut block = BeaconBlock {
|
||||||
slot: state.slot,
|
slot: state.slot,
|
||||||
@ -547,31 +768,24 @@ where
|
|||||||
},
|
},
|
||||||
signature: self.spec.empty_signature.clone(), // To be completed by a validator.
|
signature: self.spec.empty_signature.clone(), // To be completed by a validator.
|
||||||
body: BeaconBlockBody {
|
body: BeaconBlockBody {
|
||||||
proposer_slashings: vec![],
|
proposer_slashings: self.get_proposer_slashings_for_block(),
|
||||||
attester_slashings: vec![],
|
attester_slashings: self.get_attester_slashings_for_block(),
|
||||||
attestations,
|
attestations,
|
||||||
deposits: vec![],
|
deposits: self.get_deposits_for_block(),
|
||||||
exits: vec![],
|
voluntary_exits: self.get_exits_for_block(),
|
||||||
|
transfers: self.get_transfers_for_block(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!("BeaconChain::produce_block: updating state for new block.",);
|
trace!("BeaconChain::produce_block: updating state for new block.",);
|
||||||
|
|
||||||
let result =
|
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
|
||||||
state.per_block_processing_without_verifying_block_signature(&block, &self.spec);
|
|
||||||
trace!(
|
|
||||||
"BeaconNode::produce_block: state processing result: {:?}",
|
|
||||||
result
|
|
||||||
);
|
|
||||||
result.ok()?;
|
|
||||||
|
|
||||||
let state_root = state.canonical_root();
|
let state_root = state.canonical_root();
|
||||||
|
|
||||||
block.state_root = state_root;
|
block.state_root = state_root;
|
||||||
|
|
||||||
trace!("Block produced.");
|
Ok((block, state))
|
||||||
|
|
||||||
Some((block, state))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Left this as is, modify later
|
// TODO: Left this as is, modify later
|
||||||
|
@ -1,150 +0,0 @@
|
|||||||
use log::{debug, trace};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Epoch, Slot};
|
|
||||||
|
|
||||||
pub const CACHE_PREVIOUS: bool = false;
|
|
||||||
pub const CACHE_CURRENT: bool = true;
|
|
||||||
pub const CACHE_NEXT: bool = false;
|
|
||||||
|
|
||||||
pub type CrosslinkCommittees = Vec<(Vec<usize>, u64)>;
|
|
||||||
pub type Shard = u64;
|
|
||||||
pub type CommitteeIndex = u64;
|
|
||||||
pub type AttestationDuty = (Slot, Shard, CommitteeIndex);
|
|
||||||
pub type AttestationDutyMap = HashMap<u64, AttestationDuty>;
|
|
||||||
|
|
||||||
// TODO: CachedBeaconState is presently duplicating `BeaconState` and `ChainSpec`. This is a
|
|
||||||
// massive memory waste, switch them to references.
|
|
||||||
|
|
||||||
pub struct CachedBeaconState {
|
|
||||||
pub state: BeaconState,
|
|
||||||
committees: Vec<Vec<CrosslinkCommittees>>,
|
|
||||||
attestation_duties: Vec<AttestationDutyMap>,
|
|
||||||
next_epoch: Epoch,
|
|
||||||
current_epoch: Epoch,
|
|
||||||
previous_epoch: Epoch,
|
|
||||||
spec: ChainSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedBeaconState {
|
|
||||||
pub fn from_beacon_state(
|
|
||||||
state: BeaconState,
|
|
||||||
spec: ChainSpec,
|
|
||||||
) -> Result<Self, BeaconStateError> {
|
|
||||||
let current_epoch = state.current_epoch(&spec);
|
|
||||||
let previous_epoch = if current_epoch == spec.genesis_epoch {
|
|
||||||
current_epoch
|
|
||||||
} else {
|
|
||||||
current_epoch.saturating_sub(1_u64)
|
|
||||||
};
|
|
||||||
let next_epoch = state.next_epoch(&spec);
|
|
||||||
|
|
||||||
let mut committees: Vec<Vec<CrosslinkCommittees>> = Vec::with_capacity(3);
|
|
||||||
let mut attestation_duties: Vec<AttestationDutyMap> = Vec::with_capacity(3);
|
|
||||||
|
|
||||||
if CACHE_PREVIOUS {
|
|
||||||
debug!("from_beacon_state: building previous epoch cache.");
|
|
||||||
let cache = build_epoch_cache(&state, previous_epoch, &spec)?;
|
|
||||||
committees.push(cache.committees);
|
|
||||||
attestation_duties.push(cache.attestation_duty_map);
|
|
||||||
} else {
|
|
||||||
committees.push(vec![]);
|
|
||||||
attestation_duties.push(HashMap::new());
|
|
||||||
}
|
|
||||||
if CACHE_CURRENT {
|
|
||||||
debug!("from_beacon_state: building current epoch cache.");
|
|
||||||
let cache = build_epoch_cache(&state, current_epoch, &spec)?;
|
|
||||||
committees.push(cache.committees);
|
|
||||||
attestation_duties.push(cache.attestation_duty_map);
|
|
||||||
} else {
|
|
||||||
committees.push(vec![]);
|
|
||||||
attestation_duties.push(HashMap::new());
|
|
||||||
}
|
|
||||||
if CACHE_NEXT {
|
|
||||||
debug!("from_beacon_state: building next epoch cache.");
|
|
||||||
let cache = build_epoch_cache(&state, next_epoch, &spec)?;
|
|
||||||
committees.push(cache.committees);
|
|
||||||
attestation_duties.push(cache.attestation_duty_map);
|
|
||||||
} else {
|
|
||||||
committees.push(vec![]);
|
|
||||||
attestation_duties.push(HashMap::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
state,
|
|
||||||
committees,
|
|
||||||
attestation_duties,
|
|
||||||
next_epoch,
|
|
||||||
current_epoch,
|
|
||||||
previous_epoch,
|
|
||||||
spec,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slot_to_cache_index(&self, slot: Slot) -> Option<usize> {
|
|
||||||
trace!("slot_to_cache_index: cache lookup");
|
|
||||||
match slot.epoch(self.spec.epoch_length) {
|
|
||||||
epoch if (epoch == self.previous_epoch) & CACHE_PREVIOUS => Some(0),
|
|
||||||
epoch if (epoch == self.current_epoch) & CACHE_CURRENT => Some(1),
|
|
||||||
epoch if (epoch == self.next_epoch) & CACHE_NEXT => Some(2),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an
|
|
||||||
/// attestation.
|
|
||||||
///
|
|
||||||
/// Cached method.
|
|
||||||
///
|
|
||||||
/// Spec v0.2.0
|
|
||||||
pub fn attestation_slot_and_shard_for_validator(
|
|
||||||
&self,
|
|
||||||
validator_index: usize,
|
|
||||||
_spec: &ChainSpec,
|
|
||||||
) -> Result<Option<(Slot, u64, u64)>, BeaconStateError> {
|
|
||||||
// Get the result for this epoch.
|
|
||||||
let cache_index = self
|
|
||||||
.slot_to_cache_index(self.state.slot)
|
|
||||||
.expect("Current epoch should always have a cache index.");
|
|
||||||
|
|
||||||
let duties = self.attestation_duties[cache_index]
|
|
||||||
.get(&(validator_index as u64))
|
|
||||||
.and_then(|tuple| Some(*tuple));
|
|
||||||
|
|
||||||
Ok(duties)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct EpochCacheResult {
|
|
||||||
committees: Vec<CrosslinkCommittees>,
|
|
||||||
attestation_duty_map: AttestationDutyMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_epoch_cache(
|
|
||||||
state: &BeaconState,
|
|
||||||
epoch: Epoch,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<EpochCacheResult, BeaconStateError> {
|
|
||||||
let mut epoch_committees: Vec<CrosslinkCommittees> =
|
|
||||||
Vec::with_capacity(spec.epoch_length as usize);
|
|
||||||
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
|
|
||||||
|
|
||||||
for slot in epoch.slot_iter(spec.epoch_length) {
|
|
||||||
let slot_committees = state.get_crosslink_committees_at_slot(slot, false, spec)?;
|
|
||||||
|
|
||||||
for (committee, shard) in slot_committees {
|
|
||||||
for (committee_index, validator_index) in committee.iter().enumerate() {
|
|
||||||
attestation_duty_map.insert(
|
|
||||||
*validator_index as u64,
|
|
||||||
(slot, shard, committee_index as u64),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
epoch_committees.push(state.get_crosslink_committees_at_slot(slot, false, spec)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(EpochCacheResult {
|
|
||||||
committees: epoch_committees,
|
|
||||||
attestation_duty_map,
|
|
||||||
})
|
|
||||||
}
|
|
@ -3,7 +3,7 @@ use types::{BeaconBlock, BeaconState, Hash256};
|
|||||||
|
|
||||||
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
||||||
/// head, justified head and finalized head.
|
/// head, justified head and finalized head.
|
||||||
#[derive(PartialEq, Clone, Serialize)]
|
#[derive(Clone, Serialize)]
|
||||||
pub struct CheckPoint {
|
pub struct CheckPoint {
|
||||||
pub beacon_block: BeaconBlock,
|
pub beacon_block: BeaconBlock,
|
||||||
pub beacon_block_root: Hash256,
|
pub beacon_block_root: Hash256,
|
||||||
|
33
beacon_node/beacon_chain/src/errors.rs
Normal file
33
beacon_node/beacon_chain/src/errors.rs
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
use fork_choice::ForkChoiceError;
|
||||||
|
use state_processing::BlockProcessingError;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
macro_rules! easy_from_to {
|
||||||
|
($from: ident, $to: ident) => {
|
||||||
|
impl From<$from> for $to {
|
||||||
|
fn from(e: $from) -> $to {
|
||||||
|
$to::$from(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum BeaconChainError {
|
||||||
|
InsufficientValidators,
|
||||||
|
BadRecentBlockRoots,
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
DBInconsistent(String),
|
||||||
|
DBError(String),
|
||||||
|
ForkChoiceError(ForkChoiceError),
|
||||||
|
MissingBeaconBlock(Hash256),
|
||||||
|
MissingBeaconState(Hash256),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum BlockProductionError {
|
||||||
|
UnableToGetBlockRootFromState,
|
||||||
|
BlockProcessingError(BlockProcessingError),
|
||||||
|
}
|
||||||
|
|
||||||
|
easy_from_to!(BlockProcessingError, BlockProductionError);
|
@ -1,8 +1,9 @@
|
|||||||
mod attestation_aggregator;
|
mod attestation_aggregator;
|
||||||
mod beacon_chain;
|
mod beacon_chain;
|
||||||
mod cached_beacon_state;
|
|
||||||
mod checkpoint;
|
mod checkpoint;
|
||||||
|
mod errors;
|
||||||
|
|
||||||
pub use self::beacon_chain::{BeaconChain, Error};
|
pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock};
|
||||||
pub use self::checkpoint::CheckPoint;
|
pub use self::checkpoint::CheckPoint;
|
||||||
|
pub use self::errors::BeaconChainError;
|
||||||
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};
|
pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError};
|
||||||
|
@ -4,12 +4,21 @@ version = "0.1.0"
|
|||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "test_harness"
|
||||||
|
path = "src/bin.rs"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "test_harness"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "state_transition"
|
name = "state_transition"
|
||||||
harness = false
|
harness = false
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.2"
|
criterion = "0.2"
|
||||||
|
state_processing = { path = "../../../eth2/state_processing" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
attester = { path = "../../../eth2/attester" }
|
attester = { path = "../../../eth2/attester" }
|
||||||
@ -17,6 +26,7 @@ beacon_chain = { path = "../../beacon_chain" }
|
|||||||
block_proposer = { path = "../../../eth2/block_proposer" }
|
block_proposer = { path = "../../../eth2/block_proposer" }
|
||||||
bls = { path = "../../../eth2/utils/bls" }
|
bls = { path = "../../../eth2/utils/bls" }
|
||||||
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
|
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
|
||||||
|
clap = "2.32.0"
|
||||||
db = { path = "../../db" }
|
db = { path = "../../db" }
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
@ -32,3 +42,4 @@ serde_json = "1.0"
|
|||||||
slot_clock = { path = "../../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../../eth2/utils/slot_clock" }
|
||||||
ssz = { path = "../../../eth2/utils/ssz" }
|
ssz = { path = "../../../eth2/utils/ssz" }
|
||||||
types = { path = "../../../eth2/types" }
|
types = { path = "../../../eth2/types" }
|
||||||
|
yaml-rust = "0.4.2"
|
||||||
|
150
beacon_node/beacon_chain/test_harness/README.md
Normal file
150
beacon_node/beacon_chain/test_harness/README.md
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# Test Harness
|
||||||
|
|
||||||
|
Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
|
||||||
|
|
||||||
|
This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
|
||||||
|
directly to the `BeaconChain` via an `Arc`.
|
||||||
|
|
||||||
|
The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
|
||||||
|
instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
|
||||||
|
producing blocks and attestations.
|
||||||
|
|
||||||
|
The crate consists of a library and binary, examples for using both are
|
||||||
|
described below.
|
||||||
|
|
||||||
|
## YAML
|
||||||
|
|
||||||
|
Both the library and the binary are capable of parsing tests from a YAML file,
|
||||||
|
in fact this is the sole purpose of the binary.
|
||||||
|
|
||||||
|
You can find YAML test cases [here](specs/). An example is included below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
title: Validator Registry Tests
|
||||||
|
summary: Tests deposit and slashing effects on validator registry.
|
||||||
|
test_suite: validator_registry
|
||||||
|
fork: tchaikovsky
|
||||||
|
version: 1.0
|
||||||
|
test_cases:
|
||||||
|
- config:
|
||||||
|
slots_per_epoch: 64
|
||||||
|
deposits_for_chain_start: 1000
|
||||||
|
num_slots: 64
|
||||||
|
skip_slots: [2, 3]
|
||||||
|
deposits:
|
||||||
|
# At slot 1, create a new validator deposit of 32 ETH.
|
||||||
|
- slot: 1
|
||||||
|
amount: 32
|
||||||
|
# Trigger more deposits...
|
||||||
|
- slot: 3
|
||||||
|
amount: 32
|
||||||
|
- slot: 5
|
||||||
|
amount: 32
|
||||||
|
proposer_slashings:
|
||||||
|
# At slot 2, trigger a proposer slashing for validator #42.
|
||||||
|
- slot: 2
|
||||||
|
validator_index: 42
|
||||||
|
# Trigger another slashing...
|
||||||
|
- slot: 8
|
||||||
|
validator_index: 13
|
||||||
|
attester_slashings:
|
||||||
|
# At slot 2, trigger an attester slashing for validators #11 and #12.
|
||||||
|
- slot: 2
|
||||||
|
validator_indices: [11, 12]
|
||||||
|
# Trigger another slashing...
|
||||||
|
- slot: 5
|
||||||
|
validator_indices: [14]
|
||||||
|
results:
|
||||||
|
num_skipped_slots: 2
|
||||||
|
states:
|
||||||
|
- slot: 63
|
||||||
|
num_validators: 1003
|
||||||
|
slashed_validators: [11, 12, 13, 14, 42]
|
||||||
|
exited_validators: []
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the
|
||||||
|
base YAML format.
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
Wherever `slot` is used, it is actually the "slot height", or slots since
|
||||||
|
genesis. This allows the tests to disregard the `GENESIS_EPOCH`.
|
||||||
|
|
||||||
|
### Differences from Prysmatic's format
|
||||||
|
|
||||||
|
1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is
|
||||||
|
ommitted from the test specification. It assumed they should be valid
|
||||||
|
objects.
|
||||||
|
2. There is a `states` list in `results` that runs checks against any state
|
||||||
|
specified by a `slot` number. This is in contrast to the variables in
|
||||||
|
`results` that assume the last (highest) state should be inspected.
|
||||||
|
|
||||||
|
#### Reasoning
|
||||||
|
|
||||||
|
Respective reasonings for above changes:
|
||||||
|
|
||||||
|
1. This removes the concerns of the actual object structure from the tests.
|
||||||
|
This allows for more variation in the deposits/slashings objects without
|
||||||
|
needing to update the tests. Also, it makes it makes it easier to create
|
||||||
|
tests.
|
||||||
|
2. This gives more fine-grained control over the tests. It allows for checking
|
||||||
|
that certain events happened at certain times whilst making the tests only
|
||||||
|
slightly more verbose.
|
||||||
|
|
||||||
|
_Notes: it may be useful to add an extra field to each slashing type to
|
||||||
|
indicate if it should be valid or not. It also may be useful to add an option
|
||||||
|
for double-vote/surround-vote attester slashings. The `amount` field was left
|
||||||
|
on `deposits` as it changes the behaviour of state significantly._
|
||||||
|
|
||||||
|
## Binary Usage Example
|
||||||
|
|
||||||
|
Follow these steps to run as a binary:
|
||||||
|
|
||||||
|
1. Navigate to the root of this crate (where this readme is located)
|
||||||
|
2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml`
|
||||||
|
|
||||||
|
_Note: the `--release` flag builds the binary without all the debugging
|
||||||
|
instrumentation. The test is much faster built using `--release`. As is
|
||||||
|
customary in cargo, the flags before `--` are passed to cargo and the flags
|
||||||
|
after are passed to the binary._
|
||||||
|
|
||||||
|
### CLI Options
|
||||||
|
|
||||||
|
```
|
||||||
|
Lighthouse Test Harness Runner 0.0.1
|
||||||
|
Sigma Prime <contact@sigmaprime.io>
|
||||||
|
Runs `test_harness` using a YAML test_case.
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
test_harness --log-level <LOG_LEVEL> --yaml <FILE>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--log-level <LOG_LEVEL> Logging level. [default: debug] [possible values: error, warn, info, debug, trace]
|
||||||
|
--yaml <FILE> YAML file test_case.
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Library Usage Example
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use test_harness::BeaconChainHarness;
|
||||||
|
use types::ChainSpec;
|
||||||
|
|
||||||
|
let validator_count = 8;
|
||||||
|
let spec = ChainSpec::few_validators();
|
||||||
|
|
||||||
|
let mut harness = BeaconChainHarness::new(spec, validator_count);
|
||||||
|
|
||||||
|
harness.advance_chain_with_block();
|
||||||
|
|
||||||
|
let chain = harness.chain_dump().unwrap();
|
||||||
|
|
||||||
|
// One block should have been built on top of the genesis block.
|
||||||
|
assert_eq!(chain.len(), 2);
|
||||||
|
```
|
@ -1,6 +1,7 @@
|
|||||||
use criterion::Criterion;
|
use criterion::Criterion;
|
||||||
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
|
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
|
||||||
// use env_logger::{Builder, Env};
|
// use env_logger::{Builder, Env};
|
||||||
|
use state_processing::SlotProcessable;
|
||||||
use test_harness::BeaconChainHarness;
|
use test_harness::BeaconChainHarness;
|
||||||
use types::{ChainSpec, Hash256};
|
use types::{ChainSpec, Hash256};
|
||||||
|
|
||||||
@ -10,7 +11,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
|
|||||||
let validator_count = 1000;
|
let validator_count = 1000;
|
||||||
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
|
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
|
||||||
|
|
||||||
let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2);
|
let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2);
|
||||||
|
|
||||||
for _ in 0..epoch_depth {
|
for _ in 0..epoch_depth {
|
||||||
rig.advance_chain_with_block();
|
rig.advance_chain_with_block();
|
||||||
@ -18,7 +19,7 @@ fn mid_epoch_state_transition(c: &mut Criterion) {
|
|||||||
|
|
||||||
let state = rig.beacon_chain.state.read().clone();
|
let state = rig.beacon_chain.state.read().clone();
|
||||||
|
|
||||||
assert!((state.slot + 1) % rig.spec.epoch_length != 0);
|
assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0);
|
||||||
|
|
||||||
c.bench_function("mid-epoch state transition 10k validators", move |b| {
|
c.bench_function("mid-epoch state transition 10k validators", move |b| {
|
||||||
let state = state.clone();
|
let state = state.clone();
|
||||||
@ -35,7 +36,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
|
|||||||
let validator_count = 10000;
|
let validator_count = 10000;
|
||||||
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
|
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
|
||||||
|
|
||||||
let epoch_depth = rig.spec.epoch_length * 2;
|
let epoch_depth = rig.spec.slots_per_epoch * 2;
|
||||||
|
|
||||||
for _ in 0..(epoch_depth - 1) {
|
for _ in 0..(epoch_depth - 1) {
|
||||||
rig.advance_chain_with_block();
|
rig.advance_chain_with_block();
|
||||||
@ -43,7 +44,7 @@ fn epoch_boundary_state_transition(c: &mut Criterion) {
|
|||||||
|
|
||||||
let state = rig.beacon_chain.state.read().clone();
|
let state = rig.beacon_chain.state.read().clone();
|
||||||
|
|
||||||
assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0);
|
assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0);
|
||||||
|
|
||||||
c.bench(
|
c.bench(
|
||||||
"routines",
|
"routines",
|
||||||
|
@ -0,0 +1,59 @@
|
|||||||
|
title: Validator Registry Tests
|
||||||
|
summary: Tests deposit and slashing effects on validator registry.
|
||||||
|
test_suite: validator_registry
|
||||||
|
fork: tchaikovsky
|
||||||
|
version: 1.0
|
||||||
|
test_cases:
|
||||||
|
- config:
|
||||||
|
slots_per_epoch: 64
|
||||||
|
deposits_for_chain_start: 1000
|
||||||
|
num_slots: 64
|
||||||
|
skip_slots: [2, 3]
|
||||||
|
deposits:
|
||||||
|
# At slot 1, create a new validator deposit of 5 ETH.
|
||||||
|
- slot: 1
|
||||||
|
amount: 5000000000
|
||||||
|
# Trigger more deposits...
|
||||||
|
- slot: 3
|
||||||
|
amount: 5000000000
|
||||||
|
- slot: 5
|
||||||
|
amount: 32000000000
|
||||||
|
exits:
|
||||||
|
# At slot 10, submit an exit for validator #50.
|
||||||
|
- slot: 10
|
||||||
|
validator_index: 50
|
||||||
|
transfers:
|
||||||
|
- slot: 6
|
||||||
|
from: 1000
|
||||||
|
to: 1001
|
||||||
|
amount: 5000000000
|
||||||
|
proposer_slashings:
|
||||||
|
# At slot 2, trigger a proposer slashing for validator #42.
|
||||||
|
- slot: 2
|
||||||
|
validator_index: 42
|
||||||
|
# Trigger another slashing...
|
||||||
|
- slot: 8
|
||||||
|
validator_index: 13
|
||||||
|
attester_slashings:
|
||||||
|
# At slot 2, trigger an attester slashing for validators #11 and #12.
|
||||||
|
- slot: 2
|
||||||
|
validator_indices: [11, 12]
|
||||||
|
# Trigger another slashing...
|
||||||
|
- slot: 5
|
||||||
|
validator_indices: [14]
|
||||||
|
results:
|
||||||
|
num_skipped_slots: 2
|
||||||
|
states:
|
||||||
|
- slot: 63
|
||||||
|
num_validators: 1003
|
||||||
|
slashed_validators: [11, 12, 13, 14, 42]
|
||||||
|
exited_validators: []
|
||||||
|
exit_initiated_validators: [50]
|
||||||
|
balances:
|
||||||
|
- validator_index: 1000
|
||||||
|
comparison: "eq"
|
||||||
|
balance: 0
|
||||||
|
- validator_index: 1001
|
||||||
|
comparison: "eq"
|
||||||
|
balance: 10000000000
|
||||||
|
|
@ -1,7 +1,7 @@
|
|||||||
use super::ValidatorHarness;
|
use super::ValidatorHarness;
|
||||||
use beacon_chain::BeaconChain;
|
use beacon_chain::{BeaconChain, BlockProcessingOutcome};
|
||||||
pub use beacon_chain::{CheckPoint, Error as BeaconChainError};
|
pub use beacon_chain::{BeaconChainError, CheckPoint};
|
||||||
use bls::create_proof_of_possession;
|
use bls::{create_proof_of_possession, get_withdrawal_credentials};
|
||||||
use db::{
|
use db::{
|
||||||
stores::{BeaconBlockStore, BeaconStateStore},
|
stores::{BeaconBlockStore, BeaconStateStore},
|
||||||
MemoryDB,
|
MemoryDB,
|
||||||
@ -11,14 +11,9 @@ use log::debug;
|
|||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use slot_clock::TestingSlotClock;
|
use slot_clock::TestingSlotClock;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs::File;
|
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{
|
use types::*;
|
||||||
BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, FreeAttestation, Hash256,
|
|
||||||
Keypair, Slot,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
|
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
|
||||||
/// to it. Each validator is provided a borrow to the beacon chain, where it may read
|
/// to it. Each validator is provided a borrow to the beacon chain, where it may read
|
||||||
@ -72,7 +67,13 @@ impl BeaconChainHarness {
|
|||||||
timestamp: genesis_time - 1,
|
timestamp: genesis_time - 1,
|
||||||
deposit_input: DepositInput {
|
deposit_input: DepositInput {
|
||||||
pubkey: keypair.pk.clone(),
|
pubkey: keypair.pk.clone(),
|
||||||
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
|
// Validator can withdraw using their main keypair.
|
||||||
|
withdrawal_credentials: Hash256::from_slice(
|
||||||
|
&get_withdrawal_credentials(
|
||||||
|
&keypair.pk,
|
||||||
|
spec.bls_withdrawal_prefix_byte,
|
||||||
|
)[..],
|
||||||
|
),
|
||||||
proof_of_possession: create_proof_of_possession(&keypair),
|
proof_of_possession: create_proof_of_possession(&keypair),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -130,13 +131,13 @@ impl BeaconChainHarness {
|
|||||||
|
|
||||||
let nth_slot = slot
|
let nth_slot = slot
|
||||||
- slot
|
- slot
|
||||||
.epoch(self.spec.epoch_length)
|
.epoch(self.spec.slots_per_epoch)
|
||||||
.start_slot(self.spec.epoch_length);
|
.start_slot(self.spec.slots_per_epoch);
|
||||||
let nth_epoch = slot.epoch(self.spec.epoch_length) - self.spec.genesis_epoch;
|
let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
|
||||||
debug!(
|
debug!(
|
||||||
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
|
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
|
||||||
slot,
|
slot,
|
||||||
slot.epoch(self.spec.epoch_length),
|
slot.epoch(self.spec.slots_per_epoch),
|
||||||
nth_epoch,
|
nth_epoch,
|
||||||
nth_slot
|
nth_slot
|
||||||
);
|
);
|
||||||
@ -157,7 +158,7 @@ impl BeaconChainHarness {
|
|||||||
.beacon_chain
|
.beacon_chain
|
||||||
.state
|
.state
|
||||||
.read()
|
.read()
|
||||||
.get_crosslink_committees_at_slot(present_slot, false, &self.spec)
|
.get_crosslink_committees_at_slot(present_slot, &self.spec)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.iter()
|
.iter()
|
||||||
.fold(vec![], |mut acc, (committee, _slot)| {
|
.fold(vec![], |mut acc, (committee, _slot)| {
|
||||||
@ -223,7 +224,10 @@ impl BeaconChainHarness {
|
|||||||
debug!("Producing block...");
|
debug!("Producing block...");
|
||||||
let block = self.produce_block();
|
let block = self.produce_block();
|
||||||
debug!("Submitting block for processing...");
|
debug!("Submitting block for processing...");
|
||||||
self.beacon_chain.process_block(block).unwrap();
|
match self.beacon_chain.process_block(block) {
|
||||||
|
Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
|
||||||
|
other => panic!("block processing failed with {:?}", other),
|
||||||
|
};
|
||||||
debug!("...block processed by BeaconChain.");
|
debug!("...block processed by BeaconChain.");
|
||||||
|
|
||||||
debug!("Producing free attestations...");
|
debug!("Producing free attestations...");
|
||||||
@ -242,16 +246,76 @@ impl BeaconChainHarness {
|
|||||||
debug!("Free attestations processed.");
|
debug!("Free attestations processed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Signs a message using some validators secret key with the `Fork` info from the latest state
|
||||||
|
/// of the `BeaconChain`.
|
||||||
|
///
|
||||||
|
/// Useful for producing slashable messages and other objects that `BeaconChainHarness` does
|
||||||
|
/// not produce naturally.
|
||||||
|
pub fn validator_sign(
|
||||||
|
&self,
|
||||||
|
validator_index: usize,
|
||||||
|
message: &[u8],
|
||||||
|
epoch: Epoch,
|
||||||
|
domain_type: Domain,
|
||||||
|
) -> Option<Signature> {
|
||||||
|
let validator = self.validators.get(validator_index)?;
|
||||||
|
|
||||||
|
let domain = self
|
||||||
|
.spec
|
||||||
|
.get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
|
||||||
|
|
||||||
|
Some(Signature::new(message, domain, &validator.keypair.sk))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
|
||||||
|
/// `ValidatorHarness` instance for this validator.
|
||||||
|
///
|
||||||
|
/// If a new `ValidatorHarness` was created, the validator should become fully operational as
|
||||||
|
/// if the validator were created during `BeaconChainHarness` instantiation.
|
||||||
|
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
|
||||||
|
self.beacon_chain.receive_deposit_for_inclusion(deposit);
|
||||||
|
|
||||||
|
// If a keypair is present, add a new `ValidatorHarness` to the rig.
|
||||||
|
if let Some(keypair) = keypair {
|
||||||
|
let validator =
|
||||||
|
ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone());
|
||||||
|
self.validators.push(validator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Submit an exit to the `BeaconChain` for inclusion in some block.
|
||||||
|
///
|
||||||
|
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
|
||||||
|
/// will stop receiving duties from the beacon chain and just do nothing when prompted to
|
||||||
|
/// produce/attest.
|
||||||
|
pub fn add_exit(&mut self, exit: VoluntaryExit) {
|
||||||
|
self.beacon_chain.receive_exit_for_inclusion(exit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Submit an transfer to the `BeaconChain` for inclusion in some block.
|
||||||
|
pub fn add_transfer(&mut self, transfer: Transfer) {
|
||||||
|
self.beacon_chain.receive_transfer_for_inclusion(transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
|
||||||
|
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
|
||||||
|
self.beacon_chain
|
||||||
|
.receive_proposer_slashing_for_inclusion(proposer_slashing);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
|
||||||
|
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
|
||||||
|
self.beacon_chain
|
||||||
|
.receive_attester_slashing_for_inclusion(attester_slashing);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.
|
||||||
|
pub fn run_fork_choice(&mut self) {
|
||||||
|
self.beacon_chain.fork_choice().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
/// Dump all blocks and states from the canonical beacon chain.
|
/// Dump all blocks and states from the canonical beacon chain.
|
||||||
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
|
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
|
||||||
self.beacon_chain.chain_dump()
|
self.beacon_chain.chain_dump()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write the output of `chain_dump` to a JSON file.
|
|
||||||
pub fn dump_to_file(&self, filename: String, chain_dump: &[CheckPoint]) {
|
|
||||||
let json = serde_json::to_string(chain_dump).unwrap();
|
|
||||||
let mut file = File::create(filename).unwrap();
|
|
||||||
file.write_all(json.as_bytes())
|
|
||||||
.expect("Failed writing dump to file.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
69
beacon_node/beacon_chain/test_harness/src/bin.rs
Normal file
69
beacon_node/beacon_chain/test_harness/src/bin.rs
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
use clap::{App, Arg};
|
||||||
|
use env_logger::{Builder, Env};
|
||||||
|
use std::{fs::File, io::prelude::*};
|
||||||
|
use test_case::TestCase;
|
||||||
|
use yaml_rust::YamlLoader;
|
||||||
|
|
||||||
|
mod beacon_chain_harness;
|
||||||
|
mod test_case;
|
||||||
|
mod validator_harness;
|
||||||
|
|
||||||
|
use validator_harness::ValidatorHarness;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let matches = App::new("Lighthouse Test Harness Runner")
|
||||||
|
.version("0.0.1")
|
||||||
|
.author("Sigma Prime <contact@sigmaprime.io>")
|
||||||
|
.about("Runs `test_harness` using a YAML test_case.")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("yaml")
|
||||||
|
.long("yaml")
|
||||||
|
.value_name("FILE")
|
||||||
|
.help("YAML file test_case.")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("log")
|
||||||
|
.long("log-level")
|
||||||
|
.value_name("LOG_LEVEL")
|
||||||
|
.help("Logging level.")
|
||||||
|
.possible_values(&["error", "warn", "info", "debug", "trace"])
|
||||||
|
.default_value("debug")
|
||||||
|
.required(true),
|
||||||
|
)
|
||||||
|
.get_matches();
|
||||||
|
|
||||||
|
if let Some(log_level) = matches.value_of("log") {
|
||||||
|
Builder::from_env(Env::default().default_filter_or(log_level)).init();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(yaml_file) = matches.value_of("yaml") {
|
||||||
|
let docs = {
|
||||||
|
let mut file = File::open(yaml_file).unwrap();
|
||||||
|
|
||||||
|
let mut yaml_str = String::new();
|
||||||
|
file.read_to_string(&mut yaml_str).unwrap();
|
||||||
|
|
||||||
|
YamlLoader::load_from_str(&yaml_str).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
for doc in &docs {
|
||||||
|
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
|
||||||
|
// assert that the execution result matches the test_case description.
|
||||||
|
//
|
||||||
|
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
|
||||||
|
// and a new `BeaconChain` is built as per the test_case.
|
||||||
|
//
|
||||||
|
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
|
||||||
|
// and states in the chain is obtained and checked against the `results` specified in
|
||||||
|
// the `test_case`.
|
||||||
|
//
|
||||||
|
// If any of the expectations in the results are not met, the process
|
||||||
|
// panics with a message.
|
||||||
|
for test_case in doc["test_cases"].as_vec().unwrap() {
|
||||||
|
let test_case = TestCase::from_yaml(test_case);
|
||||||
|
test_case.assert_result_valid(test_case.execute())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,32 @@
|
|||||||
|
//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
|
||||||
|
//!
|
||||||
|
//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
|
||||||
|
//! directly to the `BeaconChain` via an `Arc`.
|
||||||
|
//!
|
||||||
|
//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
|
||||||
|
//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
|
||||||
|
//! producing blocks and attestations.
|
||||||
|
//!
|
||||||
|
//! Example:
|
||||||
|
//! ```
|
||||||
|
//! use test_harness::BeaconChainHarness;
|
||||||
|
//! use types::ChainSpec;
|
||||||
|
//!
|
||||||
|
//! let validator_count = 8;
|
||||||
|
//! let spec = ChainSpec::few_validators();
|
||||||
|
//!
|
||||||
|
//! let mut harness = BeaconChainHarness::new(spec, validator_count);
|
||||||
|
//!
|
||||||
|
//! harness.advance_chain_with_block();
|
||||||
|
//!
|
||||||
|
//! let chain = harness.chain_dump().unwrap();
|
||||||
|
//!
|
||||||
|
//! // One block should have been built on top of the genesis block.
|
||||||
|
//! assert_eq!(chain.len(), 2);
|
||||||
|
//! ```
|
||||||
|
|
||||||
mod beacon_chain_harness;
|
mod beacon_chain_harness;
|
||||||
|
pub mod test_case;
|
||||||
mod validator_harness;
|
mod validator_harness;
|
||||||
|
|
||||||
pub use self::beacon_chain_harness::BeaconChainHarness;
|
pub use self::beacon_chain_harness::BeaconChainHarness;
|
||||||
|
335
beacon_node/beacon_chain/test_harness/src/test_case.rs
Normal file
335
beacon_node/beacon_chain/test_harness/src/test_case.rs
Normal file
@ -0,0 +1,335 @@
|
|||||||
|
//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from
|
||||||
|
//! a YAML file.
|
||||||
|
|
||||||
|
use crate::beacon_chain_harness::BeaconChainHarness;
|
||||||
|
use beacon_chain::CheckPoint;
|
||||||
|
use bls::{create_proof_of_possession, get_withdrawal_credentials};
|
||||||
|
use log::{info, warn};
|
||||||
|
use ssz::SignedRoot;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
use types::{
|
||||||
|
attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder,
|
||||||
|
};
|
||||||
|
use yaml_rust::Yaml;
|
||||||
|
|
||||||
|
mod config;
|
||||||
|
mod results;
|
||||||
|
mod state_check;
|
||||||
|
mod yaml_helpers;
|
||||||
|
|
||||||
|
pub use config::Config;
|
||||||
|
pub use results::Results;
|
||||||
|
pub use state_check::StateCheck;
|
||||||
|
|
||||||
|
/// Defines the execution and testing of a `BeaconChainHarness` instantiation.
|
||||||
|
///
|
||||||
|
/// Typical workflow is:
|
||||||
|
///
|
||||||
|
/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);`
|
||||||
|
/// 2. Execute the test_case: `let result = test_case.execute();`
|
||||||
|
/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);`
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TestCase {
|
||||||
|
/// Defines the execution.
|
||||||
|
pub config: Config,
|
||||||
|
/// Defines tests to run against the execution result.
|
||||||
|
pub results: Results,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The result of executing a `TestCase`.
|
||||||
|
///
|
||||||
|
pub struct ExecutionResult {
|
||||||
|
/// The canonical beacon chain generated from the execution.
|
||||||
|
pub chain: Vec<CheckPoint>,
|
||||||
|
/// The spec used for execution.
|
||||||
|
pub spec: ChainSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestCase {
|
||||||
|
/// Load the test case from a YAML document.
|
||||||
|
pub fn from_yaml(test_case: &Yaml) -> Self {
|
||||||
|
Self {
|
||||||
|
results: Results::from_yaml(&test_case["results"]),
|
||||||
|
config: Config::from_yaml(&test_case["config"]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a `ChainSpec::foundation()`.
|
||||||
|
///
|
||||||
|
/// If specified in `config`, returns it with a modified `slots_per_epoch`.
|
||||||
|
fn spec(&self) -> ChainSpec {
|
||||||
|
let mut spec = ChainSpec::foundation();
|
||||||
|
|
||||||
|
if let Some(n) = self.config.slots_per_epoch {
|
||||||
|
spec.slots_per_epoch = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
spec
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes the test case, returning an `ExecutionResult`.
|
||||||
|
#[allow(clippy::cyclomatic_complexity)]
|
||||||
|
pub fn execute(&self) -> ExecutionResult {
|
||||||
|
let spec = self.spec();
|
||||||
|
let validator_count = self.config.deposits_for_chain_start;
|
||||||
|
let slots = self.config.num_slots;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Building BeaconChainHarness with {} validators...",
|
||||||
|
validator_count
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut harness = BeaconChainHarness::new(spec, validator_count);
|
||||||
|
|
||||||
|
info!("Starting simulation across {} slots...", slots);
|
||||||
|
|
||||||
|
// Start at 1 because genesis counts as a slot.
|
||||||
|
for slot_height in 1..slots {
|
||||||
|
// Used to ensure that deposits in the same slot have incremental deposit indices.
|
||||||
|
let mut deposit_index_offset = 0;
|
||||||
|
|
||||||
|
// Feed deposits to the BeaconChain.
|
||||||
|
if let Some(ref deposits) = self.config.deposits {
|
||||||
|
for (slot, amount) in deposits {
|
||||||
|
if *slot == slot_height {
|
||||||
|
info!("Including deposit at slot height {}.", slot_height);
|
||||||
|
let (deposit, keypair) =
|
||||||
|
build_deposit(&harness, *amount, deposit_index_offset);
|
||||||
|
harness.add_deposit(deposit, Some(keypair.clone()));
|
||||||
|
deposit_index_offset += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed proposer slashings to the BeaconChain.
|
||||||
|
if let Some(ref slashings) = self.config.proposer_slashings {
|
||||||
|
for (slot, validator_index) in slashings {
|
||||||
|
if *slot == slot_height {
|
||||||
|
info!(
|
||||||
|
"Including proposer slashing at slot height {} for validator #{}.",
|
||||||
|
slot_height, validator_index
|
||||||
|
);
|
||||||
|
let slashing = build_proposer_slashing(&harness, *validator_index);
|
||||||
|
harness.add_proposer_slashing(slashing);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed attester slashings to the BeaconChain.
|
||||||
|
if let Some(ref slashings) = self.config.attester_slashings {
|
||||||
|
for (slot, validator_indices) in slashings {
|
||||||
|
if *slot == slot_height {
|
||||||
|
info!(
|
||||||
|
"Including attester slashing at slot height {} for validators {:?}.",
|
||||||
|
slot_height, validator_indices
|
||||||
|
);
|
||||||
|
let slashing =
|
||||||
|
build_double_vote_attester_slashing(&harness, &validator_indices[..]);
|
||||||
|
harness.add_attester_slashing(slashing);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed exits to the BeaconChain.
|
||||||
|
if let Some(ref exits) = self.config.exits {
|
||||||
|
for (slot, validator_index) in exits {
|
||||||
|
if *slot == slot_height {
|
||||||
|
info!(
|
||||||
|
"Including exit at slot height {} for validator {}.",
|
||||||
|
slot_height, validator_index
|
||||||
|
);
|
||||||
|
let exit = build_exit(&harness, *validator_index);
|
||||||
|
harness.add_exit(exit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed transfers to the BeaconChain.
|
||||||
|
if let Some(ref transfers) = self.config.transfers {
|
||||||
|
for (slot, from, to, amount) in transfers {
|
||||||
|
if *slot == slot_height {
|
||||||
|
info!(
|
||||||
|
"Including transfer at slot height {} from validator {}.",
|
||||||
|
slot_height, from
|
||||||
|
);
|
||||||
|
let transfer = build_transfer(&harness, *from, *to, *amount);
|
||||||
|
harness.add_transfer(transfer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build a block or skip a slot.
|
||||||
|
match self.config.skip_slots {
|
||||||
|
Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
|
||||||
|
warn!("Skipping slot at height {}.", slot_height);
|
||||||
|
harness.increment_beacon_chain_slot();
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
info!("Producing block at slot height {}.", slot_height);
|
||||||
|
harness.advance_chain_with_block();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
harness.run_fork_choice();
|
||||||
|
|
||||||
|
info!("Test execution complete!");
|
||||||
|
|
||||||
|
info!("Building chain dump for analysis...");
|
||||||
|
|
||||||
|
ExecutionResult {
|
||||||
|
chain: harness.chain_dump().expect("Chain dump failed."),
|
||||||
|
spec: (*harness.spec).clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics with a message if any result does not match exepectations.
|
||||||
|
pub fn assert_result_valid(&self, execution_result: ExecutionResult) {
|
||||||
|
info!("Verifying test results...");
|
||||||
|
let spec = &execution_result.spec;
|
||||||
|
|
||||||
|
if let Some(num_skipped_slots) = self.results.num_skipped_slots {
|
||||||
|
assert_eq!(
|
||||||
|
execution_result.chain.len(),
|
||||||
|
self.config.num_slots as usize - num_skipped_slots,
|
||||||
|
"actual skipped slots != expected."
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"OK: Chain length is {} ({} skipped slots).",
|
||||||
|
execution_result.chain.len(),
|
||||||
|
num_skipped_slots
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref state_checks) = self.results.state_checks {
|
||||||
|
for checkpoint in &execution_result.chain {
|
||||||
|
let state = &checkpoint.beacon_state;
|
||||||
|
|
||||||
|
for state_check in state_checks {
|
||||||
|
let adjusted_state_slot =
|
||||||
|
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
|
||||||
|
|
||||||
|
if state_check.slot == adjusted_state_slot {
|
||||||
|
state_check.assert_valid(state, spec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
|
||||||
|
fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer {
|
||||||
|
let slot = harness.beacon_chain.state.read().slot + 1;
|
||||||
|
|
||||||
|
let mut transfer = Transfer {
|
||||||
|
from,
|
||||||
|
to,
|
||||||
|
amount,
|
||||||
|
fee: 0,
|
||||||
|
slot,
|
||||||
|
pubkey: harness.validators[from as usize].keypair.pk.clone(),
|
||||||
|
signature: Signature::empty_signature(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let message = transfer.signed_root();
|
||||||
|
let epoch = slot.epoch(harness.spec.slots_per_epoch);
|
||||||
|
|
||||||
|
transfer.signature = harness
|
||||||
|
.validator_sign(from as usize, &message[..], epoch, Domain::Transfer)
|
||||||
|
.expect("Unable to sign Transfer");
|
||||||
|
|
||||||
|
transfer
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
|
||||||
|
///
|
||||||
|
/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
|
||||||
|
/// deposits.
|
||||||
|
fn build_deposit(
|
||||||
|
harness: &BeaconChainHarness,
|
||||||
|
amount: u64,
|
||||||
|
index_offset: u64,
|
||||||
|
) -> (Deposit, Keypair) {
|
||||||
|
let keypair = Keypair::random();
|
||||||
|
let proof_of_possession = create_proof_of_possession(&keypair);
|
||||||
|
let index = harness.beacon_chain.state.read().deposit_index + index_offset;
|
||||||
|
let withdrawal_credentials = Hash256::from_slice(
|
||||||
|
&get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..],
|
||||||
|
);
|
||||||
|
|
||||||
|
let deposit = Deposit {
|
||||||
|
// Note: `branch` and `index` will need to be updated once the spec defines their
|
||||||
|
// validity.
|
||||||
|
branch: vec![],
|
||||||
|
index,
|
||||||
|
deposit_data: DepositData {
|
||||||
|
amount,
|
||||||
|
timestamp: 1,
|
||||||
|
deposit_input: DepositInput {
|
||||||
|
pubkey: keypair.pk.clone(),
|
||||||
|
withdrawal_credentials,
|
||||||
|
proof_of_possession,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
(deposit, keypair)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
|
||||||
|
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
|
||||||
|
let epoch = harness
|
||||||
|
.beacon_chain
|
||||||
|
.state
|
||||||
|
.read()
|
||||||
|
.current_epoch(&harness.spec);
|
||||||
|
|
||||||
|
let mut exit = VoluntaryExit {
|
||||||
|
epoch,
|
||||||
|
validator_index,
|
||||||
|
signature: Signature::empty_signature(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let message = exit.signed_root();
|
||||||
|
|
||||||
|
exit.signature = harness
|
||||||
|
.validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
|
||||||
|
.expect("Unable to sign VoluntaryExit");
|
||||||
|
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds an `AttesterSlashing` for some `validator_indices`.
|
||||||
|
///
|
||||||
|
/// Signs the message using a `BeaconChainHarness`.
|
||||||
|
fn build_double_vote_attester_slashing(
|
||||||
|
harness: &BeaconChainHarness,
|
||||||
|
validator_indices: &[u64],
|
||||||
|
) -> AttesterSlashing {
|
||||||
|
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
|
||||||
|
harness
|
||||||
|
.validator_sign(validator_index as usize, message, epoch, domain)
|
||||||
|
.expect("Unable to sign AttesterSlashing")
|
||||||
|
};
|
||||||
|
|
||||||
|
AttesterSlashingBuilder::double_vote(validator_indices, signer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds an `ProposerSlashing` for some `validator_index`.
|
||||||
|
///
|
||||||
|
/// Signs the message using a `BeaconChainHarness`.
|
||||||
|
fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
|
||||||
|
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
|
||||||
|
harness
|
||||||
|
.validator_sign(validator_index as usize, message, epoch, domain)
|
||||||
|
.expect("Unable to sign AttesterSlashing")
|
||||||
|
};
|
||||||
|
|
||||||
|
ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
|
||||||
|
}
|
132
beacon_node/beacon_chain/test_harness/src/test_case/config.rs
Normal file
132
beacon_node/beacon_chain/test_harness/src/test_case/config.rs
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
|
||||||
|
use types::*;
|
||||||
|
use yaml_rust::Yaml;
|
||||||
|
|
||||||
|
pub type ValidatorIndex = u64;
|
||||||
|
pub type ValidatorIndices = Vec<u64>;
|
||||||
|
pub type GweiAmount = u64;
|
||||||
|
|
||||||
|
pub type DepositTuple = (SlotHeight, GweiAmount);
|
||||||
|
pub type ExitTuple = (SlotHeight, ValidatorIndex);
|
||||||
|
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
|
||||||
|
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
|
||||||
|
/// (slot_height, from, to, amount)
|
||||||
|
pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
|
||||||
|
|
||||||
|
/// Defines the execution of a `BeaconStateHarness` across a series of slots.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Config {
|
||||||
|
/// Initial validators.
|
||||||
|
pub deposits_for_chain_start: usize,
|
||||||
|
/// Number of slots in an epoch.
|
||||||
|
pub slots_per_epoch: Option<u64>,
|
||||||
|
/// Number of slots to build before ending execution.
|
||||||
|
pub num_slots: u64,
|
||||||
|
/// Number of slots that should be skipped due to inactive validator.
|
||||||
|
pub skip_slots: Option<Vec<u64>>,
|
||||||
|
/// Deposits to be included during execution.
|
||||||
|
pub deposits: Option<Vec<DepositTuple>>,
|
||||||
|
/// Proposer slashings to be included during execution.
|
||||||
|
pub proposer_slashings: Option<Vec<ProposerSlashingTuple>>,
|
||||||
|
/// Attester slashings to be including during execution.
|
||||||
|
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
|
||||||
|
/// Exits to be including during execution.
|
||||||
|
pub exits: Option<Vec<ExitTuple>>,
|
||||||
|
/// Transfers to be including during execution.
|
||||||
|
pub transfers: Option<Vec<TransferTuple>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
/// Load from a YAML document.
|
||||||
|
///
|
||||||
|
/// Expects to receive the `config` section of the document.
|
||||||
|
pub fn from_yaml(yaml: &Yaml) -> Self {
|
||||||
|
Self {
|
||||||
|
deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
|
||||||
|
.expect("Must specify validator count"),
|
||||||
|
slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
|
||||||
|
num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
|
||||||
|
skip_slots: as_vec_u64(yaml, "skip_slots"),
|
||||||
|
deposits: parse_deposits(&yaml),
|
||||||
|
proposer_slashings: parse_proposer_slashings(&yaml),
|
||||||
|
attester_slashings: parse_attester_slashings(&yaml),
|
||||||
|
exits: parse_exits(&yaml),
|
||||||
|
transfers: parse_transfers(&yaml),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `transfers` section of the YAML document.
|
||||||
|
fn parse_transfers(yaml: &Yaml) -> Option<Vec<TransferTuple>> {
|
||||||
|
let mut tuples = vec![];
|
||||||
|
|
||||||
|
for exit in yaml["transfers"].as_vec()? {
|
||||||
|
let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
|
||||||
|
let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
|
||||||
|
let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
|
||||||
|
let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
|
||||||
|
|
||||||
|
tuples.push((SlotHeight::from(slot), from, to, amount));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(tuples)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `attester_slashings` section of the YAML document.
|
||||||
|
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
|
||||||
|
let mut tuples = vec![];
|
||||||
|
|
||||||
|
for exit in yaml["exits"].as_vec()? {
|
||||||
|
let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
|
||||||
|
let validator_index =
|
||||||
|
as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
|
||||||
|
|
||||||
|
tuples.push((SlotHeight::from(slot), validator_index));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(tuples)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `attester_slashings` section of the YAML document.
|
||||||
|
fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
|
||||||
|
let mut slashings = vec![];
|
||||||
|
|
||||||
|
for slashing in yaml["attester_slashings"].as_vec()? {
|
||||||
|
let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)");
|
||||||
|
let validator_indices = as_vec_u64(slashing, "validator_indices")
|
||||||
|
.expect("Incomplete attester_slashing (validator_indices)");
|
||||||
|
|
||||||
|
slashings.push((SlotHeight::from(slot), validator_indices));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(slashings)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `proposer_slashings` section of the YAML document.
|
||||||
|
fn parse_proposer_slashings(yaml: &Yaml) -> Option<Vec<ProposerSlashingTuple>> {
|
||||||
|
let mut slashings = vec![];
|
||||||
|
|
||||||
|
for slashing in yaml["proposer_slashings"].as_vec()? {
|
||||||
|
let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_");
|
||||||
|
let validator_index = as_u64(slashing, "validator_index")
|
||||||
|
.expect("Incomplete proposer slashing (validator_index)");
|
||||||
|
|
||||||
|
slashings.push((SlotHeight::from(slot), validator_index));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(slashings)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `deposits` section of the YAML document.
|
||||||
|
fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
|
||||||
|
let mut deposits = vec![];
|
||||||
|
|
||||||
|
for deposit in yaml["deposits"].as_vec()? {
|
||||||
|
let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
|
||||||
|
let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
|
||||||
|
|
||||||
|
deposits.push((SlotHeight::from(slot), amount))
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(deposits)
|
||||||
|
}
|
@ -0,0 +1,34 @@
|
|||||||
|
use super::state_check::StateCheck;
|
||||||
|
use super::yaml_helpers::as_usize;
|
||||||
|
use yaml_rust::Yaml;
|
||||||
|
|
||||||
|
/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a
|
||||||
|
/// `TestCase`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Results {
|
||||||
|
pub num_skipped_slots: Option<usize>,
|
||||||
|
pub state_checks: Option<Vec<StateCheck>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Results {
|
||||||
|
/// Load from a YAML document.
|
||||||
|
///
|
||||||
|
/// Expects the `results` section of the YAML document.
|
||||||
|
pub fn from_yaml(yaml: &Yaml) -> Self {
|
||||||
|
Self {
|
||||||
|
num_skipped_slots: as_usize(yaml, "num_skipped_slots"),
|
||||||
|
state_checks: parse_state_checks(yaml),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `state_checks` section of the YAML document.
|
||||||
|
fn parse_state_checks(yaml: &Yaml) -> Option<Vec<StateCheck>> {
|
||||||
|
let mut states = vec![];
|
||||||
|
|
||||||
|
for state_yaml in yaml["states"].as_vec()? {
|
||||||
|
states.push(StateCheck::from_yaml(state_yaml));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(states)
|
||||||
|
}
|
@ -0,0 +1,178 @@
|
|||||||
|
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
|
||||||
|
use log::info;
|
||||||
|
use types::*;
|
||||||
|
use yaml_rust::Yaml;
|
||||||
|
|
||||||
|
type ValidatorIndex = u64;
|
||||||
|
type BalanceGwei = u64;
|
||||||
|
|
||||||
|
type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
|
||||||
|
|
||||||
|
/// Tests to be conducted upon a `BeaconState` object generated during the execution of a
|
||||||
|
/// `TestCase`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct StateCheck {
|
||||||
|
/// Checked against `beacon_state.slot`.
|
||||||
|
pub slot: Slot,
|
||||||
|
/// Checked against `beacon_state.validator_registry.len()`.
|
||||||
|
pub num_validators: Option<usize>,
|
||||||
|
/// A list of validator indices which have been penalized. Must be in ascending order.
|
||||||
|
pub slashed_validators: Option<Vec<u64>>,
|
||||||
|
/// A list of validator indices which have been fully exited. Must be in ascending order.
|
||||||
|
pub exited_validators: Option<Vec<u64>>,
|
||||||
|
/// A list of validator indices which have had an exit initiated. Must be in ascending order.
|
||||||
|
pub exit_initiated_validators: Option<Vec<u64>>,
|
||||||
|
/// A list of balances to check.
|
||||||
|
pub balances: Option<Vec<BalanceCheckTuple>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StateCheck {
|
||||||
|
/// Load from a YAML document.
|
||||||
|
///
|
||||||
|
/// Expects the `state_check` section of the YAML document.
|
||||||
|
pub fn from_yaml(yaml: &Yaml) -> Self {
|
||||||
|
Self {
|
||||||
|
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
|
||||||
|
num_validators: as_usize(&yaml, "num_validators"),
|
||||||
|
slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
|
||||||
|
exited_validators: as_vec_u64(&yaml, "exited_validators"),
|
||||||
|
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
|
||||||
|
balances: parse_balances(&yaml),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs all checks against a `BeaconState`
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics with an error message if any test fails.
|
||||||
|
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
|
||||||
|
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
|
||||||
|
|
||||||
|
info!("Running state check for slot height {}.", self.slot);
|
||||||
|
|
||||||
|
// Check the state slot.
|
||||||
|
assert_eq!(
|
||||||
|
self.slot,
|
||||||
|
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
|
||||||
|
"State slot is invalid."
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(num_validators) = self.num_validators {
|
||||||
|
assert_eq!(
|
||||||
|
state.validator_registry.len(),
|
||||||
|
num_validators,
|
||||||
|
"State validator count != expected."
|
||||||
|
);
|
||||||
|
info!("OK: num_validators = {}.", num_validators);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for slashed validators.
|
||||||
|
if let Some(ref slashed_validators) = self.slashed_validators {
|
||||||
|
let actually_slashed_validators: Vec<u64> = state
|
||||||
|
.validator_registry
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(i, validator)| {
|
||||||
|
if validator.slashed {
|
||||||
|
Some(i as u64)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(
|
||||||
|
actually_slashed_validators, *slashed_validators,
|
||||||
|
"Slashed validators != expected."
|
||||||
|
);
|
||||||
|
info!("OK: slashed_validators = {:?}.", slashed_validators);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for exited validators.
|
||||||
|
if let Some(ref exited_validators) = self.exited_validators {
|
||||||
|
let actually_exited_validators: Vec<u64> = state
|
||||||
|
.validator_registry
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(i, validator)| {
|
||||||
|
if validator.is_exited_at(state_epoch) {
|
||||||
|
Some(i as u64)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(
|
||||||
|
actually_exited_validators, *exited_validators,
|
||||||
|
"Exited validators != expected."
|
||||||
|
);
|
||||||
|
info!("OK: exited_validators = {:?}.", exited_validators);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for validators that have initiated exit.
|
||||||
|
if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
|
||||||
|
let actual: Vec<u64> = state
|
||||||
|
.validator_registry
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(i, validator)| {
|
||||||
|
if validator.initiated_exit {
|
||||||
|
Some(i as u64)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(
|
||||||
|
actual, *exit_initiated_validators,
|
||||||
|
"Exit initiated validators != expected."
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"OK: exit_initiated_validators = {:?}.",
|
||||||
|
exit_initiated_validators
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check validator balances.
|
||||||
|
if let Some(ref balances) = self.balances {
|
||||||
|
for (index, comparison, expected) in balances {
|
||||||
|
let actual = *state
|
||||||
|
.validator_balances
|
||||||
|
.get(*index as usize)
|
||||||
|
.expect("Balance check specifies unknown validator");
|
||||||
|
|
||||||
|
let result = match comparison.as_ref() {
|
||||||
|
"eq" => actual == *expected,
|
||||||
|
_ => panic!("Unknown balance comparison (use `eq`)"),
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
result,
|
||||||
|
format!(
|
||||||
|
"Validator balance for {}: {} !{} {}.",
|
||||||
|
index, actual, comparison, expected
|
||||||
|
)
|
||||||
|
);
|
||||||
|
info!("OK: validator balance for {:?}.", index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the `transfers` section of the YAML document.
|
||||||
|
fn parse_balances(yaml: &Yaml) -> Option<Vec<BalanceCheckTuple>> {
|
||||||
|
let mut tuples = vec![];
|
||||||
|
|
||||||
|
for exit in yaml["balances"].as_vec()? {
|
||||||
|
let from =
|
||||||
|
as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
|
||||||
|
let comparison = exit["comparison"]
|
||||||
|
.clone()
|
||||||
|
.into_string()
|
||||||
|
.expect("Incomplete balance check (amount)");
|
||||||
|
let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
|
||||||
|
|
||||||
|
tuples.push((from, comparison, balance));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(tuples)
|
||||||
|
}
|
@ -0,0 +1,19 @@
|
|||||||
|
use yaml_rust::Yaml;
|
||||||
|
|
||||||
|
pub fn as_usize(yaml: &Yaml, key: &str) -> Option<usize> {
|
||||||
|
yaml[key].as_i64().and_then(|n| Some(n as usize))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_u64(yaml: &Yaml, key: &str) -> Option<u64> {
|
||||||
|
yaml[key].as_i64().and_then(|n| Some(n as u64))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option<Vec<u64>> {
|
||||||
|
yaml[key].clone().into_vec().and_then(|vec| {
|
||||||
|
Some(
|
||||||
|
vec.iter()
|
||||||
|
.map(|item| item.as_i64().unwrap() as u64)
|
||||||
|
.collect(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
@ -80,8 +80,8 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> BeaconBlockNode for DirectBeaconN
|
|||||||
let (block, _state) = self
|
let (block, _state) = self
|
||||||
.beacon_chain
|
.beacon_chain
|
||||||
.produce_block(randao_reveal.clone())
|
.produce_block(randao_reveal.clone())
|
||||||
.ok_or_else(|| {
|
.map_err(|e| {
|
||||||
BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string())
|
BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if block.slot == slot {
|
if block.slot == slot {
|
||||||
|
@ -9,7 +9,7 @@ use db::ClientDB;
|
|||||||
use fork_choice::ForkChoice;
|
use fork_choice::ForkChoice;
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{PublicKey, Slot};
|
use types::{Fork, PublicKey, Slot};
|
||||||
|
|
||||||
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
|
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
|
||||||
/// it.
|
/// it.
|
||||||
@ -40,6 +40,10 @@ impl<T: ClientDB, U: SlotClock, F: ForkChoice> ProducerDutiesReader for DirectDu
|
|||||||
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
|
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fork(&self) -> Result<Fork, ProducerDutiesReaderError> {
|
||||||
|
Ok(self.beacon_chain.state.read().fork.clone())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {
|
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {
|
||||||
|
@ -1,47 +1,36 @@
|
|||||||
use attester::Signer as AttesterSigner;
|
use attester::Signer as AttesterSigner;
|
||||||
use block_proposer::Signer as BlockProposerSigner;
|
use block_proposer::Signer as BlockProposerSigner;
|
||||||
use std::sync::RwLock;
|
|
||||||
use types::{Keypair, Signature};
|
use types::{Keypair, Signature};
|
||||||
|
|
||||||
/// A test-only struct used to perform signing for a proposer or attester.
|
/// A test-only struct used to perform signing for a proposer or attester.
|
||||||
pub struct LocalSigner {
|
pub struct LocalSigner {
|
||||||
keypair: Keypair,
|
keypair: Keypair,
|
||||||
should_sign: RwLock<bool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalSigner {
|
impl LocalSigner {
|
||||||
/// Produce a new TestSigner with signing enabled by default.
|
/// Produce a new TestSigner with signing enabled by default.
|
||||||
pub fn new(keypair: Keypair) -> Self {
|
pub fn new(keypair: Keypair) -> Self {
|
||||||
Self {
|
Self { keypair }
|
||||||
keypair,
|
|
||||||
should_sign: RwLock::new(true),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
|
|
||||||
/// will be signed.
|
|
||||||
pub fn enable_signing(&self, enabled: bool) {
|
|
||||||
*self.should_sign.write().unwrap() = enabled;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sign some message.
|
/// Sign some message.
|
||||||
fn bls_sign(&self, message: &[u8]) -> Option<Signature> {
|
fn bls_sign(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
Some(Signature::new(message, &self.keypair.sk))
|
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockProposerSigner for LocalSigner {
|
impl BlockProposerSigner for LocalSigner {
|
||||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
self.bls_sign(message)
|
self.bls_sign(message, domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
self.bls_sign(message)
|
self.bls_sign(message, domain)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttesterSigner for LocalSigner {
|
impl AttesterSigner for LocalSigner {
|
||||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
self.bls_sign(message)
|
self.bls_sign(message, domain)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,24 +28,28 @@ pub enum AttestationProduceError {
|
|||||||
PollError(AttestationPollError),
|
PollError(AttestationPollError),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TestingBlockProducer = BlockProducer<
|
||||||
|
TestingSlotClock,
|
||||||
|
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
||||||
|
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
||||||
|
LocalSigner,
|
||||||
|
>;
|
||||||
|
|
||||||
|
type TestingAttester = Attester<
|
||||||
|
TestingSlotClock,
|
||||||
|
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
||||||
|
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
||||||
|
LocalSigner,
|
||||||
|
>;
|
||||||
|
|
||||||
/// A `BlockProducer` and `Attester` which sign using a common keypair.
|
/// A `BlockProducer` and `Attester` which sign using a common keypair.
|
||||||
///
|
///
|
||||||
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
|
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
|
||||||
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
|
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
|
||||||
/// chain tests.
|
/// chain tests.
|
||||||
pub struct ValidatorHarness {
|
pub struct ValidatorHarness {
|
||||||
pub block_producer: BlockProducer<
|
pub block_producer: TestingBlockProducer,
|
||||||
TestingSlotClock,
|
pub attester: TestingAttester,
|
||||||
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
LocalSigner,
|
|
||||||
>,
|
|
||||||
pub attester: Attester<
|
|
||||||
TestingSlotClock,
|
|
||||||
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
LocalSigner,
|
|
||||||
>,
|
|
||||||
pub spec: Arc<ChainSpec>,
|
pub spec: Arc<ChainSpec>,
|
||||||
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
|
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
|
||||||
pub keypair: Keypair,
|
pub keypair: Keypair,
|
||||||
|
@ -29,15 +29,16 @@ fn it_can_produce_past_first_epoch_boundary() {
|
|||||||
|
|
||||||
debug!("Harness built, tests starting..");
|
debug!("Harness built, tests starting..");
|
||||||
|
|
||||||
let blocks = harness.spec.epoch_length * 2 + 1;
|
let blocks = harness.spec.slots_per_epoch * 2 + 1;
|
||||||
|
|
||||||
for i in 0..blocks {
|
for i in 0..blocks {
|
||||||
harness.advance_chain_with_block();
|
harness.advance_chain_with_block();
|
||||||
debug!("Produced block {}/{}.", i + 1, blocks);
|
debug!("Produced block {}/{}.", i + 1, blocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
harness.run_fork_choice();
|
||||||
|
|
||||||
let dump = harness.chain_dump().expect("Chain dump failed.");
|
let dump = harness.chain_dump().expect("Chain dump failed.");
|
||||||
|
|
||||||
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
|
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
|
||||||
|
|
||||||
harness.dump_to_file("/tmp/chaindump.json".to_string(), &dump);
|
|
||||||
}
|
}
|
||||||
|
@ -134,9 +134,9 @@ mod tests {
|
|||||||
let store = BeaconBlockStore::new(db.clone());
|
let store = BeaconBlockStore::new(db.clone());
|
||||||
|
|
||||||
let ssz = "definitly not a valid block".as_bytes();
|
let ssz = "definitly not a valid block".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store.block_at_slot(hash, Slot::from(42_u64)),
|
store.block_at_slot(hash, Slot::from(42_u64)),
|
||||||
Err(BeaconBlockAtSlotError::DBError(
|
Err(BeaconBlockAtSlotError::DBError(
|
||||||
@ -151,10 +151,10 @@ mod tests {
|
|||||||
let store = BeaconBlockStore::new(db.clone());
|
let store = BeaconBlockStore::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
let other_hash = &Hash256::from("another hash".as_bytes());
|
let other_hash = &Hash256::from([0xBB; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store.block_at_slot(other_hash, Slot::from(42_u64)),
|
store.block_at_slot(other_hash, Slot::from(42_u64)),
|
||||||
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash))
|
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash))
|
||||||
@ -169,18 +169,15 @@ mod tests {
|
|||||||
let thread_count = 10;
|
let thread_count = 10;
|
||||||
let write_count = 10;
|
let write_count = 10;
|
||||||
|
|
||||||
// We're expecting the product of these numbers to fit in one byte.
|
|
||||||
assert!(thread_count * write_count <= 255);
|
|
||||||
|
|
||||||
let mut handles = vec![];
|
let mut handles = vec![];
|
||||||
for t in 0..thread_count {
|
for t in 0..thread_count {
|
||||||
let wc = write_count;
|
let wc = write_count;
|
||||||
let bs = bs.clone();
|
let bs = bs.clone();
|
||||||
let handle = thread::spawn(move || {
|
let handle = thread::spawn(move || {
|
||||||
for w in 0..wc {
|
for w in 0..wc {
|
||||||
let key = (t * w) as u8;
|
let key = t * w;
|
||||||
let val = 42;
|
let val = 42;
|
||||||
bs.put(&[key][..].into(), &vec![val]).unwrap();
|
bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
handles.push(handle);
|
handles.push(handle);
|
||||||
@ -192,9 +189,9 @@ mod tests {
|
|||||||
|
|
||||||
for t in 0..thread_count {
|
for t in 0..thread_count {
|
||||||
for w in 0..write_count {
|
for w in 0..write_count {
|
||||||
let key = (t * w) as u8;
|
let key = t * w;
|
||||||
assert!(bs.exists(&[key][..].into()).unwrap());
|
assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap());
|
||||||
let val = bs.get(&[key][..].into()).unwrap().unwrap();
|
let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap();
|
||||||
assert_eq!(vec![42], val);
|
assert_eq!(vec![42], val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -208,19 +205,20 @@ mod tests {
|
|||||||
|
|
||||||
// Specify test block parameters.
|
// Specify test block parameters.
|
||||||
let hashes = [
|
let hashes = [
|
||||||
Hash256::from(&[0; 32][..]),
|
Hash256::from([0; 32]),
|
||||||
Hash256::from(&[1; 32][..]),
|
Hash256::from([1; 32]),
|
||||||
Hash256::from(&[2; 32][..]),
|
Hash256::from([2; 32]),
|
||||||
Hash256::from(&[3; 32][..]),
|
Hash256::from([3; 32]),
|
||||||
Hash256::from(&[4; 32][..]),
|
Hash256::from([4; 32]),
|
||||||
];
|
];
|
||||||
let parent_hashes = [
|
let parent_hashes = [
|
||||||
Hash256::from(&[255; 32][..]), // Genesis block.
|
Hash256::from([255; 32]), // Genesis block.
|
||||||
Hash256::from(&[0; 32][..]),
|
Hash256::from([0; 32]),
|
||||||
Hash256::from(&[1; 32][..]),
|
Hash256::from([1; 32]),
|
||||||
Hash256::from(&[2; 32][..]),
|
Hash256::from([2; 32]),
|
||||||
Hash256::from(&[3; 32][..]),
|
Hash256::from([3; 32]),
|
||||||
];
|
];
|
||||||
|
let unknown_hash = Hash256::from([101; 32]); // different from all above
|
||||||
let slots: Vec<Slot> = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect();
|
let slots: Vec<Slot> = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect();
|
||||||
|
|
||||||
// Generate a vec of random blocks and store them in the DB.
|
// Generate a vec of random blocks and store them in the DB.
|
||||||
@ -233,7 +231,7 @@ mod tests {
|
|||||||
block.slot = slots[i];
|
block.slot = slots[i];
|
||||||
|
|
||||||
let ssz = ssz_encode(&block);
|
let ssz = ssz_encode(&block);
|
||||||
db.put(DB_COLUMN, &hashes[i], &ssz).unwrap();
|
db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap();
|
||||||
|
|
||||||
blocks.push(block);
|
blocks.push(block);
|
||||||
}
|
}
|
||||||
@ -255,11 +253,10 @@ mod tests {
|
|||||||
let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap();
|
let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap();
|
||||||
assert_eq!(ssz, None);
|
assert_eq!(ssz, None);
|
||||||
|
|
||||||
let bad_hash = &Hash256::from("unknown".as_bytes());
|
let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2));
|
||||||
let ssz = bs.block_at_slot(bad_hash, Slot::new(2));
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ssz,
|
ssz,
|
||||||
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash))
|
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,25 +2,25 @@ macro_rules! impl_crud_for_store {
|
|||||||
($store: ident, $db_column: expr) => {
|
($store: ident, $db_column: expr) => {
|
||||||
impl<T: ClientDB> $store<T> {
|
impl<T: ClientDB> $store<T> {
|
||||||
pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> {
|
pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> {
|
||||||
self.db.put($db_column, hash, ssz)
|
self.db.put($db_column, hash.as_bytes(), ssz)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self, hash: &Hash256) -> Result<Option<Vec<u8>>, DBError> {
|
pub fn get(&self, hash: &Hash256) -> Result<Option<Vec<u8>>, DBError> {
|
||||||
self.db.get($db_column, hash)
|
self.db.get($db_column, hash.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exists(&self, hash: &Hash256) -> Result<bool, DBError> {
|
pub fn exists(&self, hash: &Hash256) -> Result<bool, DBError> {
|
||||||
self.db.exists($db_column, hash)
|
self.db.exists($db_column, hash.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> {
|
pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> {
|
||||||
self.db.delete($db_column, hash)
|
self.db.delete($db_column, hash.as_bytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused_macros)]
|
#[cfg(test)]
|
||||||
macro_rules! test_crud_for_store {
|
macro_rules! test_crud_for_store {
|
||||||
($store: ident, $db_column: expr) => {
|
($store: ident, $db_column: expr) => {
|
||||||
#[test]
|
#[test]
|
||||||
@ -29,10 +29,10 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
store.put(hash, ssz).unwrap();
|
store.put(hash, ssz).unwrap();
|
||||||
assert_eq!(db.get(DB_COLUMN, hash).unwrap().unwrap(), ssz);
|
assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -41,9 +41,9 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert_eq!(store.get(hash).unwrap().unwrap(), ssz);
|
assert_eq!(store.get(hash).unwrap().unwrap(), ssz);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,10 +53,10 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
let other_hash = &Hash256::from("another hash".as_bytes());
|
let other_hash = &Hash256::from([0xBB; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, other_hash, ssz).unwrap();
|
db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap();
|
||||||
assert_eq!(store.get(hash).unwrap(), None);
|
assert_eq!(store.get(hash).unwrap(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,9 +66,9 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert!(store.exists(hash).unwrap());
|
assert!(store.exists(hash).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,10 +78,10 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
let other_hash = &Hash256::from("another hash".as_bytes());
|
let other_hash = &Hash256::from([0xBB; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert!(!store.exists(other_hash).unwrap());
|
assert!(!store.exists(other_hash).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,13 +91,13 @@ macro_rules! test_crud_for_store {
|
|||||||
let store = $store::new(db.clone());
|
let store = $store::new(db.clone());
|
||||||
|
|
||||||
let ssz = "some bytes".as_bytes();
|
let ssz = "some bytes".as_bytes();
|
||||||
let hash = &Hash256::from("some hash".as_bytes());
|
let hash = &Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
db.put(DB_COLUMN, hash, ssz).unwrap();
|
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
|
||||||
assert!(db.exists(DB_COLUMN, hash).unwrap());
|
assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
|
||||||
|
|
||||||
store.delete(hash).unwrap();
|
store.delete(hash).unwrap();
|
||||||
assert!(!db.exists(DB_COLUMN, hash).unwrap());
|
assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ mod tests {
|
|||||||
let db = Arc::new(MemoryDB::open());
|
let db = Arc::new(MemoryDB::open());
|
||||||
let store = PoWChainStore::new(db.clone());
|
let store = PoWChainStore::new(db.clone());
|
||||||
|
|
||||||
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
|
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
|
||||||
store.put_block_hash(hash).unwrap();
|
store.put_block_hash(hash).unwrap();
|
||||||
|
|
||||||
assert!(db.exists(DB_COLUMN, hash).unwrap());
|
assert!(db.exists(DB_COLUMN, hash).unwrap());
|
||||||
@ -48,7 +48,7 @@ mod tests {
|
|||||||
let db = Arc::new(MemoryDB::open());
|
let db = Arc::new(MemoryDB::open());
|
||||||
let store = PoWChainStore::new(db.clone());
|
let store = PoWChainStore::new(db.clone());
|
||||||
|
|
||||||
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
|
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
|
||||||
db.put(DB_COLUMN, hash, &[0]).unwrap();
|
db.put(DB_COLUMN, hash, &[0]).unwrap();
|
||||||
|
|
||||||
assert!(store.block_hash_exists(hash).unwrap());
|
assert!(store.block_hash_exists(hash).unwrap());
|
||||||
@ -59,8 +59,8 @@ mod tests {
|
|||||||
let db = Arc::new(MemoryDB::open());
|
let db = Arc::new(MemoryDB::open());
|
||||||
let store = PoWChainStore::new(db.clone());
|
let store = PoWChainStore::new(db.clone());
|
||||||
|
|
||||||
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
|
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
|
||||||
let other_hash = &Hash256::from("another hash".as_bytes()).to_vec();
|
let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec();
|
||||||
db.put(DB_COLUMN, hash, &[0]).unwrap();
|
db.put(DB_COLUMN, hash, &[0]).unwrap();
|
||||||
|
|
||||||
assert!(!store.block_hash_exists(other_hash).unwrap());
|
assert!(!store.block_hash_exists(other_hash).unwrap());
|
||||||
|
@ -78,7 +78,7 @@ fn main() {
|
|||||||
|
|
||||||
// Slot clock
|
// Slot clock
|
||||||
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
|
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
|
||||||
let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration)
|
let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot)
|
||||||
.expect("Unable to load SystemTimeSlotClock");
|
.expect("Unable to load SystemTimeSlotClock");
|
||||||
// Choose the fork choice
|
// Choose the fork choice
|
||||||
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
|
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
|
||||||
|
@ -67,7 +67,7 @@ into individual crates wherever possible.
|
|||||||
|
|
||||||
Generally, tests can be kept in the same file, as is typical in Rust.
|
Generally, tests can be kept in the same file, as is typical in Rust.
|
||||||
Integration tests should be placed in the `tests` directory in the crate's
|
Integration tests should be placed in the `tests` directory in the crate's
|
||||||
root. Particularity large (line-count) tests should be placed into a separate
|
root. Particularly large (line-count) tests should be placed into a separate
|
||||||
file.
|
file.
|
||||||
|
|
||||||
A function is not considered complete until a test exists for it. We produce
|
A function is not considered complete until a test exists for it. We produce
|
||||||
|
@ -122,7 +122,7 @@ project.
|
|||||||
* **Module**: A collection of items: functions, structs, traits, and even other
|
* **Module**: A collection of items: functions, structs, traits, and even other
|
||||||
modules. Modules allow you to hierarchically split code into logical units
|
modules. Modules allow you to hierarchically split code into logical units
|
||||||
and manage visibility.
|
and manage visibility.
|
||||||
* **Attribute**: Metadaata applied to some module, crate or item.
|
* **Attribute**: Metadata applied to some module, crate or item.
|
||||||
* **Macros**: Macros are powerful meta-programming statements that get expanded
|
* **Macros**: Macros are powerful meta-programming statements that get expanded
|
||||||
into source code that gets compiled with the rest of the code (Unlike `C`
|
into source code that gets compiled with the rest of the code (Unlike `C`
|
||||||
macros that are pre-processed, Rust macros form an Abstract Syntax Tree).
|
macros that are pre-processed, Rust macros form an Abstract Syntax Tree).
|
||||||
@ -185,7 +185,7 @@ check your code.
|
|||||||
| Function / Method | ``snake_case`` |
|
| Function / Method | ``snake_case`` |
|
||||||
| Macro Names | ``snake_case`` |
|
| Macro Names | ``snake_case`` |
|
||||||
| Constants | ``SCREAMING_SNAKE_CASE`` |
|
| Constants | ``SCREAMING_SNAKE_CASE`` |
|
||||||
| Forbidden name | Trialing Underscore: ``name_`` |
|
| Forbidden name | Trailing Underscore: ``name_`` |
|
||||||
|
|
||||||
Other general rust docs:
|
Other general rust docs:
|
||||||
|
|
||||||
|
37
eth2/README.md
Normal file
37
eth2/README.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# Ethereum 2.0 Common Crates
|
||||||
|
|
||||||
|
Rust crates containing logic common across the Lighthouse project.
|
||||||
|
|
||||||
|
## Per-Crate Summary
|
||||||
|
|
||||||
|
- [`attester/`](attester/): Core logic for attesting to beacon and shard blocks.
|
||||||
|
- [`block_proposer/`](block_proposer/): Core logic for proposing beacon blocks.
|
||||||
|
- [`fork_choice/`](fork_choice/): A collection of fork-choice algorithms for
|
||||||
|
the Beacon Chain.
|
||||||
|
- [`state_processing/`](state_processing/): Provides per-slot, per-block, and
|
||||||
|
per-epoch state processing.
|
||||||
|
- [`types/`](types/): Defines base Ethereum 2.0 types (e.g., `BeaconBlock`,
|
||||||
|
`BeaconState`, etc).
|
||||||
|
- [`utils/`](utils/):
|
||||||
|
- [`bls`](utils/bls/): A wrapper for an external BLS encryption library.
|
||||||
|
- [`boolean-bitfield`](utils/boolean-bitfield/): Provides an expandable vector
|
||||||
|
of bools, specifically for use in Eth2.
|
||||||
|
- [`fisher-yates-shuffle`](utils/fisher-yates-shuffle/): shuffles a list
|
||||||
|
pseudo-randomly.
|
||||||
|
- [`hashing`](utils/hashing/): A wrapper for external hashing libraries.
|
||||||
|
- [`honey-badger-split`](utils/honey-badger-split/): Splits a list in `n`
|
||||||
|
parts without giving AF about the length of the list, `n`, or anything
|
||||||
|
else.
|
||||||
|
- [`int-to-bytes`](utils/int-to-bytes/): Simple library which converts ints
|
||||||
|
into byte-strings of various lengths.
|
||||||
|
- [`slot_clock`](utils/slot_clock/): translates the system time into Beacon
|
||||||
|
Chain "slots". (Also provides another slot clock that's useful during
|
||||||
|
testing.)
|
||||||
|
- [`ssz`](utils/ssz/): an implementation of the SimpleSerialize
|
||||||
|
serialization/deserialization protocol used by Eth 2.0.
|
||||||
|
- [`ssz_derive`](utils/ssz_derive/): provides procedural macros for
|
||||||
|
deriving SSZ `Encodable`, `Decodable`, and `TreeHash` methods.
|
||||||
|
- [`swap_or_not_shuffle`](utils/swap_or_not_shuffle/): a list-shuffling
|
||||||
|
method which is slow, but allows for a subset of indices to be shuffled.
|
||||||
|
- [`test_random_derive`](utils/test_random_derive/): provides procedural
|
||||||
|
macros for deriving the `TestRandom` trait defined in `types`.
|
@ -2,14 +2,16 @@ pub mod test_utils;
|
|||||||
mod traits;
|
mod traits;
|
||||||
|
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use ssz::TreeHash;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{AttestationData, FreeAttestation, Signature, Slot};
|
use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot};
|
||||||
|
|
||||||
pub use self::traits::{
|
pub use self::traits::{
|
||||||
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
||||||
};
|
};
|
||||||
|
|
||||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
const PHASE_0_CUSTODY_BIT: bool = false;
|
||||||
|
const DOMAIN_ATTESTATION: u64 = 1;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum PollOutcome {
|
pub enum PollOutcome {
|
||||||
@ -136,8 +138,14 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V,
|
|||||||
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
|
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
|
||||||
self.store_produce(attestation_data);
|
self.store_produce(attestation_data);
|
||||||
|
|
||||||
|
let message = AttestationDataAndCustodyBit {
|
||||||
|
data: attestation_data.clone(),
|
||||||
|
custody_bit: PHASE_0_CUSTODY_BIT,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
|
||||||
self.signer
|
self.signer
|
||||||
.sign_attestation_message(&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..])
|
.sign_attestation_message(&message[..], DOMAIN_ATTESTATION)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if signing some attestation_data is safe (non-slashable).
|
/// Returns `true` if signing some attestation_data is safe (non-slashable).
|
||||||
@ -192,9 +200,9 @@ mod tests {
|
|||||||
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
||||||
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
||||||
|
|
||||||
let mut duties = EpochMap::new(spec.epoch_length);
|
let mut duties = EpochMap::new(spec.slots_per_epoch);
|
||||||
let attest_slot = Slot::new(100);
|
let attest_slot = Slot::new(100);
|
||||||
let attest_epoch = attest_slot / spec.epoch_length;
|
let attest_epoch = attest_slot / spec.slots_per_epoch;
|
||||||
let attest_shard = 12;
|
let attest_shard = 12;
|
||||||
duties.insert_attestation_shard(attest_slot, attest_shard);
|
duties.insert_attestation_shard(attest_slot, attest_shard);
|
||||||
duties.set_validator_index(Some(2));
|
duties.set_validator_index(Some(2));
|
||||||
@ -240,7 +248,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// In an epoch without known duties...
|
// In an epoch without known duties...
|
||||||
let slot = (attest_epoch + 1) * spec.epoch_length;
|
let slot = (attest_epoch + 1) * spec.slots_per_epoch;
|
||||||
slot_clock.set_slot(slot.into());
|
slot_clock.set_slot(slot.into());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
attester.poll(),
|
attester.poll(),
|
||||||
|
@ -3,22 +3,22 @@ use std::collections::HashMap;
|
|||||||
use types::{Epoch, Slot};
|
use types::{Epoch, Slot};
|
||||||
|
|
||||||
pub struct EpochMap {
|
pub struct EpochMap {
|
||||||
epoch_length: u64,
|
slots_per_epoch: u64,
|
||||||
validator_index: Option<u64>,
|
validator_index: Option<u64>,
|
||||||
map: HashMap<Epoch, (Slot, u64)>,
|
map: HashMap<Epoch, (Slot, u64)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EpochMap {
|
impl EpochMap {
|
||||||
pub fn new(epoch_length: u64) -> Self {
|
pub fn new(slots_per_epoch: u64) -> Self {
|
||||||
Self {
|
Self {
|
||||||
epoch_length,
|
slots_per_epoch,
|
||||||
validator_index: None,
|
validator_index: None,
|
||||||
map: HashMap::new(),
|
map: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) {
|
pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) {
|
||||||
let epoch = slot.epoch(self.epoch_length);
|
let epoch = slot.epoch(self.slots_per_epoch);
|
||||||
self.map.insert(epoch, (slot, shard));
|
self.map.insert(epoch, (slot, shard));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -29,7 +29,7 @@ impl EpochMap {
|
|||||||
|
|
||||||
impl DutiesReader for EpochMap {
|
impl DutiesReader for EpochMap {
|
||||||
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> {
|
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> {
|
||||||
let epoch = slot.epoch(self.epoch_length);
|
let epoch = slot.epoch(self.slots_per_epoch);
|
||||||
|
|
||||||
match self.map.get(&epoch) {
|
match self.map.get(&epoch) {
|
||||||
Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)),
|
Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)),
|
||||||
|
@ -25,7 +25,7 @@ impl LocalSigner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Signer for LocalSigner {
|
impl Signer for LocalSigner {
|
||||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
Some(Signature::new(message, &self.keypair.sk))
|
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,5 +45,5 @@ pub trait DutiesReader: Send + Sync {
|
|||||||
|
|
||||||
/// Signs message using an internally-maintained private key.
|
/// Signs message using an internally-maintained private key.
|
||||||
pub trait Signer {
|
pub trait Signer {
|
||||||
fn sign_attestation_message(&self, message: &[u8]) -> Option<Signature>;
|
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
pub mod test_utils;
|
pub mod test_utils;
|
||||||
mod traits;
|
mod traits;
|
||||||
|
|
||||||
use int_to_bytes::int_to_bytes32;
|
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use ssz::{SignedRoot, TreeHash};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, ChainSpec, Slot};
|
use types::{BeaconBlock, ChainSpec, Domain, Hash256, Proposal, Slot};
|
||||||
|
|
||||||
pub use self::traits::{
|
pub use self::traits::{
|
||||||
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
||||||
@ -28,6 +28,8 @@ pub enum PollOutcome {
|
|||||||
SignerRejection(Slot),
|
SignerRejection(Slot),
|
||||||
/// The public key for this validator is not an active validator.
|
/// The public key for this validator is not an active validator.
|
||||||
ValidatorIsUnknown(Slot),
|
ValidatorIsUnknown(Slot),
|
||||||
|
/// Unable to determine a `Fork` for signature domain generation.
|
||||||
|
UnableToGetFork(Slot),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -130,11 +132,20 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
|
|||||||
/// The slash-protection code is not yet implemented. There is zero protection against
|
/// The slash-protection code is not yet implemented. There is zero protection against
|
||||||
/// slashing.
|
/// slashing.
|
||||||
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
|
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
|
||||||
|
let fork = match self.epoch_map.fork() {
|
||||||
|
Ok(fork) => fork,
|
||||||
|
Err(_) => return Ok(PollOutcome::UnableToGetFork(slot)),
|
||||||
|
};
|
||||||
|
|
||||||
let randao_reveal = {
|
let randao_reveal = {
|
||||||
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
|
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
|
||||||
let message = int_to_bytes32(slot.epoch(self.spec.epoch_length).as_u64());
|
let message = slot.epoch(self.spec.slots_per_epoch).hash_tree_root();
|
||||||
|
|
||||||
match self.signer.sign_randao_reveal(&message) {
|
match self.signer.sign_randao_reveal(
|
||||||
|
&message,
|
||||||
|
self.spec
|
||||||
|
.get_domain(slot.epoch(self.spec.slots_per_epoch), Domain::Randao, &fork),
|
||||||
|
) {
|
||||||
None => return Ok(PollOutcome::SignerRejection(slot)),
|
None => return Ok(PollOutcome::SignerRejection(slot)),
|
||||||
Some(signature) => signature,
|
Some(signature) => signature,
|
||||||
}
|
}
|
||||||
@ -145,7 +156,12 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
|
|||||||
.produce_beacon_block(slot, &randao_reveal)?
|
.produce_beacon_block(slot, &randao_reveal)?
|
||||||
{
|
{
|
||||||
if self.safe_to_produce(&block) {
|
if self.safe_to_produce(&block) {
|
||||||
if let Some(block) = self.sign_block(block) {
|
let domain = self.spec.get_domain(
|
||||||
|
slot.epoch(self.spec.slots_per_epoch),
|
||||||
|
Domain::Proposal,
|
||||||
|
&fork,
|
||||||
|
);
|
||||||
|
if let Some(block) = self.sign_block(block, domain) {
|
||||||
self.beacon_node.publish_beacon_block(block)?;
|
self.beacon_node.publish_beacon_block(block)?;
|
||||||
Ok(PollOutcome::BlockProduced(slot))
|
Ok(PollOutcome::BlockProduced(slot))
|
||||||
} else {
|
} else {
|
||||||
@ -163,12 +179,19 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
|
|||||||
///
|
///
|
||||||
/// Important: this function will not check to ensure the block is not slashable. This must be
|
/// Important: this function will not check to ensure the block is not slashable. This must be
|
||||||
/// done upstream.
|
/// done upstream.
|
||||||
fn sign_block(&mut self, mut block: BeaconBlock) -> Option<BeaconBlock> {
|
fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
|
||||||
self.store_produce(&block);
|
self.store_produce(&block);
|
||||||
|
|
||||||
|
let proposal = Proposal {
|
||||||
|
slot: block.slot,
|
||||||
|
shard: self.spec.beacon_chain_shard_number,
|
||||||
|
block_root: Hash256::from_slice(&block.signed_root()[..]),
|
||||||
|
signature: block.signature.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.signer
|
.signer
|
||||||
.sign_block_proposal(&block.proposal_root(&self.spec)[..])
|
.sign_block_proposal(&proposal.signed_root()[..], domain)
|
||||||
{
|
{
|
||||||
None => None,
|
None => None,
|
||||||
Some(signature) => {
|
Some(signature) => {
|
||||||
@ -230,9 +253,9 @@ mod tests {
|
|||||||
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
||||||
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
||||||
|
|
||||||
let mut epoch_map = EpochMap::new(spec.epoch_length);
|
let mut epoch_map = EpochMap::new(spec.slots_per_epoch);
|
||||||
let produce_slot = Slot::new(100);
|
let produce_slot = Slot::new(100);
|
||||||
let produce_epoch = produce_slot.epoch(spec.epoch_length);
|
let produce_epoch = produce_slot.epoch(spec.slots_per_epoch);
|
||||||
epoch_map.map.insert(produce_epoch, produce_slot);
|
epoch_map.map.insert(produce_epoch, produce_slot);
|
||||||
let epoch_map = Arc::new(epoch_map);
|
let epoch_map = Arc::new(epoch_map);
|
||||||
|
|
||||||
@ -277,7 +300,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// In an epoch without known duties...
|
// In an epoch without known duties...
|
||||||
let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length;
|
let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch;
|
||||||
slot_clock.set_slot(slot);
|
slot_clock.set_slot(slot);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block_proposer.poll(),
|
block_proposer.poll(),
|
||||||
|
@ -1,16 +1,16 @@
|
|||||||
use crate::{DutiesReader, DutiesReaderError};
|
use crate::{DutiesReader, DutiesReaderError};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use types::{Epoch, Slot};
|
use types::{Epoch, Fork, Slot};
|
||||||
|
|
||||||
pub struct EpochMap {
|
pub struct EpochMap {
|
||||||
epoch_length: u64,
|
slots_per_epoch: u64,
|
||||||
pub map: HashMap<Epoch, Slot>,
|
pub map: HashMap<Epoch, Slot>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EpochMap {
|
impl EpochMap {
|
||||||
pub fn new(epoch_length: u64) -> Self {
|
pub fn new(slots_per_epoch: u64) -> Self {
|
||||||
Self {
|
Self {
|
||||||
epoch_length,
|
slots_per_epoch,
|
||||||
map: HashMap::new(),
|
map: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -18,11 +18,19 @@ impl EpochMap {
|
|||||||
|
|
||||||
impl DutiesReader for EpochMap {
|
impl DutiesReader for EpochMap {
|
||||||
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
|
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
|
||||||
let epoch = slot.epoch(self.epoch_length);
|
let epoch = slot.epoch(self.slots_per_epoch);
|
||||||
match self.map.get(&epoch) {
|
match self.map.get(&epoch) {
|
||||||
Some(s) if *s == slot => Ok(true),
|
Some(s) if *s == slot => Ok(true),
|
||||||
Some(s) if *s != slot => Ok(false),
|
Some(s) if *s != slot => Ok(false),
|
||||||
_ => Err(DutiesReaderError::UnknownEpoch),
|
_ => Err(DutiesReaderError::UnknownEpoch),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fork(&self) -> Result<Fork, DutiesReaderError> {
|
||||||
|
Ok(Fork {
|
||||||
|
previous_version: 0,
|
||||||
|
current_version: 0,
|
||||||
|
epoch: Epoch::new(0),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,11 +25,11 @@ impl LocalSigner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Signer for LocalSigner {
|
impl Signer for LocalSigner {
|
||||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
Some(Signature::new(message, &self.keypair.sk))
|
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature> {
|
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
||||||
Some(Signature::new(message, &self.keypair.sk))
|
Some(Signature::new(message, domain, &self.keypair.sk))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use types::{BeaconBlock, Signature, Slot};
|
use types::{BeaconBlock, Fork, Signature, Slot};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum BeaconNodeError {
|
pub enum BeaconNodeError {
|
||||||
@ -40,10 +40,11 @@ pub enum DutiesReaderError {
|
|||||||
/// Informs a validator of their duties (e.g., block production).
|
/// Informs a validator of their duties (e.g., block production).
|
||||||
pub trait DutiesReader: Send + Sync {
|
pub trait DutiesReader: Send + Sync {
|
||||||
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>;
|
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>;
|
||||||
|
fn fork(&self) -> Result<Fork, DutiesReaderError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signs message using an internally-maintained private key.
|
/// Signs message using an internally-maintained private key.
|
||||||
pub trait Signer {
|
pub trait Signer {
|
||||||
fn sign_block_proposal(&self, message: &[u8]) -> Option<Signature>;
|
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||||
fn sign_randao_reveal(&self, message: &[u8]) -> Option<Signature>;
|
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@ edition = "2018"
|
|||||||
db = { path = "../../beacon_node/db" }
|
db = { path = "../../beacon_node/db" }
|
||||||
ssz = { path = "../utils/ssz" }
|
ssz = { path = "../utils/ssz" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
fast-math = "0.1.1"
|
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
bit-vec = "0.5.0"
|
bit-vec = "0.5.0"
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
|
//! The optimised bitwise LMD-GHOST fork choice rule.
|
||||||
extern crate bit_vec;
|
extern crate bit_vec;
|
||||||
extern crate fast_math;
|
|
||||||
|
|
||||||
use crate::{ForkChoice, ForkChoiceError};
|
use crate::{ForkChoice, ForkChoiceError};
|
||||||
use bit_vec::BitVec;
|
use bit_vec::BitVec;
|
||||||
@ -7,7 +7,6 @@ use db::{
|
|||||||
stores::{BeaconBlockStore, BeaconStateStore},
|
stores::{BeaconBlockStore, BeaconStateStore},
|
||||||
ClientDB,
|
ClientDB,
|
||||||
};
|
};
|
||||||
use fast_math::log2_raw;
|
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -19,33 +18,28 @@ use types::{
|
|||||||
//TODO: Pruning - Children
|
//TODO: Pruning - Children
|
||||||
//TODO: Handle Syncing
|
//TODO: Handle Syncing
|
||||||
|
|
||||||
/// The optimised bitwise LMD-GHOST fork choice rule.
|
// NOTE: This uses u32 to represent difference between block heights. Thus this is only
|
||||||
/// NOTE: This uses u32 to represent difference between block heights. Thus this is only
|
// applicable for block height differences in the range of a u32.
|
||||||
/// applicable for block height differences in the range of a u32.
|
// This can potentially be parallelized in some parts.
|
||||||
/// This can potentially be parallelized in some parts.
|
|
||||||
// we use fast log2, a log2 lookup table is implemented in Vitaliks code, potentially do
|
/// Compute the base-2 logarithm of an integer, floored (rounded down)
|
||||||
// the comparison. Log2_raw takes 2ns according to the documentation.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn log2_int(x: u32) -> u32 {
|
fn log2_int(x: u64) -> u32 {
|
||||||
if x == 0 {
|
if x == 0 {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
assert!(
|
63 - x.leading_zeros()
|
||||||
x <= std::f32::MAX as u32,
|
|
||||||
"Height too large for fast log in bitwise fork choice"
|
|
||||||
);
|
|
||||||
log2_raw(x as f32) as u32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn power_of_2_below(x: u32) -> u32 {
|
fn power_of_2_below(x: u64) -> u64 {
|
||||||
2u32.pow(log2_int(x))
|
2u64.pow(log2_int(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm.
|
/// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm.
|
||||||
pub struct BitwiseLMDGhost<T: ClientDB + Sized> {
|
pub struct BitwiseLMDGhost<T: ClientDB + Sized> {
|
||||||
/// A cache of known ancestors at given heights for a specific block.
|
/// A cache of known ancestors at given heights for a specific block.
|
||||||
//TODO: Consider FnvHashMap
|
//TODO: Consider FnvHashMap
|
||||||
cache: HashMap<CacheKey<u32>, Hash256>,
|
cache: HashMap<CacheKey<u64>, Hash256>,
|
||||||
/// Log lookup table for blocks to their ancestors.
|
/// Log lookup table for blocks to their ancestors.
|
||||||
//TODO: Verify we only want/need a size 16 log lookup
|
//TODO: Verify we only want/need a size 16 log lookup
|
||||||
ancestors: Vec<HashMap<Hash256, Hash256>>,
|
ancestors: Vec<HashMap<Hash256, Hash256>>,
|
||||||
@ -101,7 +95,7 @@ where
|
|||||||
|
|
||||||
let active_validator_indices = get_active_validator_indices(
|
let active_validator_indices = get_active_validator_indices(
|
||||||
¤t_state.validator_registry[..],
|
¤t_state.validator_registry[..],
|
||||||
block_slot.epoch(spec.epoch_length),
|
block_slot.epoch(spec.slots_per_epoch),
|
||||||
);
|
);
|
||||||
|
|
||||||
for index in active_validator_indices {
|
for index in active_validator_indices {
|
||||||
@ -147,7 +141,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check if the result is stored in our cache
|
// check if the result is stored in our cache
|
||||||
let cache_key = CacheKey::new(&block_hash, target_height.as_u32());
|
let cache_key = CacheKey::new(&block_hash, target_height.as_u64());
|
||||||
if let Some(ancestor) = self.cache.get(&cache_key) {
|
if let Some(ancestor) = self.cache.get(&cache_key) {
|
||||||
return Some(*ancestor);
|
return Some(*ancestor);
|
||||||
}
|
}
|
||||||
@ -155,7 +149,7 @@ where
|
|||||||
// not in the cache recursively search for ancestors using a log-lookup
|
// not in the cache recursively search for ancestors using a log-lookup
|
||||||
if let Some(ancestor) = {
|
if let Some(ancestor) = {
|
||||||
let ancestor_lookup = self.ancestors
|
let ancestor_lookup = self.ancestors
|
||||||
[log2_int((block_height - target_height - 1u64).as_u32()) as usize]
|
[log2_int((block_height - target_height - 1u64).as_u64()) as usize]
|
||||||
.get(&block_hash)
|
.get(&block_hash)
|
||||||
//TODO: Panic if we can't lookup and fork choice fails
|
//TODO: Panic if we can't lookup and fork choice fails
|
||||||
.expect("All blocks should be added to the ancestor log lookup table");
|
.expect("All blocks should be added to the ancestor log lookup table");
|
||||||
@ -192,7 +186,7 @@ where
|
|||||||
}
|
}
|
||||||
// Check if there is a clear block winner at this height. If so return it.
|
// Check if there is a clear block winner at this height. If so return it.
|
||||||
for (hash, votes) in current_votes.iter() {
|
for (hash, votes) in current_votes.iter() {
|
||||||
if *votes >= total_vote_count / 2 {
|
if *votes > total_vote_count / 2 {
|
||||||
// we have a clear winner, return it
|
// we have a clear winner, return it
|
||||||
return Some(*hash);
|
return Some(*hash);
|
||||||
}
|
}
|
||||||
@ -216,7 +210,7 @@ where
|
|||||||
|
|
||||||
trace!("Child vote length: {}", votes.len());
|
trace!("Child vote length: {}", votes.len());
|
||||||
for (candidate, votes) in votes.iter() {
|
for (candidate, votes) in votes.iter() {
|
||||||
let candidate_bit: BitVec = BitVec::from_bytes(&candidate);
|
let candidate_bit: BitVec = BitVec::from_bytes(candidate.as_bytes());
|
||||||
|
|
||||||
// if the bitmasks don't match, exclude candidate
|
// if the bitmasks don't match, exclude candidate
|
||||||
if !bitmask.iter().eq(candidate_bit.iter().take(bit)) {
|
if !bitmask.iter().eq(candidate_bit.iter().take(bit)) {
|
||||||
@ -371,18 +365,21 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
|
|||||||
// if there are no children, we are done, return the current_head
|
// if there are no children, we are done, return the current_head
|
||||||
let children = match self.children.get(¤t_head) {
|
let children = match self.children.get(¤t_head) {
|
||||||
Some(children) => children.clone(),
|
Some(children) => children.clone(),
|
||||||
None => return Ok(current_head),
|
None => {
|
||||||
|
debug!("Head found: {}", current_head);
|
||||||
|
return Ok(current_head);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// logarithmic lookup blocks to see if there are obvious winners, if so,
|
// logarithmic lookup blocks to see if there are obvious winners, if so,
|
||||||
// progress to the next iteration.
|
// progress to the next iteration.
|
||||||
let mut step =
|
let mut step =
|
||||||
power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u32()) / 2;
|
power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2;
|
||||||
while step > 0 {
|
while step > 0 {
|
||||||
trace!("Current Step: {}", step);
|
trace!("Current Step: {}", step);
|
||||||
if let Some(clear_winner) = self.get_clear_winner(
|
if let Some(clear_winner) = self.get_clear_winner(
|
||||||
&latest_votes,
|
&latest_votes,
|
||||||
block_height - (block_height % u64::from(step)) + u64::from(step),
|
block_height - (block_height % step) + step,
|
||||||
spec,
|
spec,
|
||||||
) {
|
) {
|
||||||
current_head = clear_winner;
|
current_head = clear_winner;
|
||||||
@ -391,7 +388,7 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
|
|||||||
step /= 2;
|
step /= 2;
|
||||||
}
|
}
|
||||||
if step > 0 {
|
if step > 0 {
|
||||||
trace!("Found clear winner in log lookup");
|
trace!("Found clear winner: {}", current_head);
|
||||||
}
|
}
|
||||||
// if our skip lookup failed and we only have one child, progress to that child
|
// if our skip lookup failed and we only have one child, progress to that child
|
||||||
else if children.len() == 1 {
|
else if children.len() == 1 {
|
||||||
@ -466,7 +463,6 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_power_of_2_below() {
|
pub fn test_power_of_2_below() {
|
||||||
println!("{:?}", std::f32::MAX);
|
|
||||||
assert_eq!(power_of_2_below(4), 4);
|
assert_eq!(power_of_2_below(4), 4);
|
||||||
assert_eq!(power_of_2_below(5), 4);
|
assert_eq!(power_of_2_below(5), 4);
|
||||||
assert_eq!(power_of_2_below(7), 4);
|
assert_eq!(power_of_2_below(7), 4);
|
||||||
@ -475,4 +471,12 @@ mod tests {
|
|||||||
assert_eq!(power_of_2_below(33), 32);
|
assert_eq!(power_of_2_below(33), 32);
|
||||||
assert_eq!(power_of_2_below(63), 32);
|
assert_eq!(power_of_2_below(63), 32);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_power_of_2_below_large() {
|
||||||
|
let pow: u64 = 1 << 24;
|
||||||
|
for x in (pow - 20)..(pow + 20) {
|
||||||
|
assert!(power_of_2_below(x) <= x, "{}", x);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
1
eth2/fork_choice/src/protolambda_lmd_ghost.rs
Normal file
1
eth2/fork_choice/src/protolambda_lmd_ghost.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
|
@ -64,7 +64,7 @@ where
|
|||||||
|
|
||||||
let active_validator_indices = get_active_validator_indices(
|
let active_validator_indices = get_active_validator_indices(
|
||||||
¤t_state.validator_registry[..],
|
¤t_state.validator_registry[..],
|
||||||
block_slot.epoch(spec.epoch_length),
|
block_slot.epoch(spec.slots_per_epoch),
|
||||||
);
|
);
|
||||||
|
|
||||||
for index in active_validator_indices {
|
for index in active_validator_indices {
|
||||||
|
@ -35,3 +35,31 @@ test_cases:
|
|||||||
- b3: 3
|
- b3: 3
|
||||||
heads:
|
heads:
|
||||||
- id: 'b2'
|
- id: 'b2'
|
||||||
|
- blocks:
|
||||||
|
- id: 'b0'
|
||||||
|
parent: 'b0'
|
||||||
|
- id: 'b1'
|
||||||
|
parent: 'b0'
|
||||||
|
- id: 'b2'
|
||||||
|
parent: 'b0'
|
||||||
|
- id: 'b3'
|
||||||
|
parent: 'b1'
|
||||||
|
- id: 'b4'
|
||||||
|
parent: 'b1'
|
||||||
|
- id: 'b5'
|
||||||
|
parent: 'b1'
|
||||||
|
- id: 'b6'
|
||||||
|
parent: 'b2'
|
||||||
|
- id: 'b7'
|
||||||
|
parent: 'b6'
|
||||||
|
weights:
|
||||||
|
- b0: 0
|
||||||
|
- b1: 3
|
||||||
|
- b2: 2
|
||||||
|
- b3: 1
|
||||||
|
- b4: 1
|
||||||
|
- b5: 1
|
||||||
|
- b6: 2
|
||||||
|
- b7: 2
|
||||||
|
heads:
|
||||||
|
- id: 'b4'
|
||||||
|
@ -81,7 +81,8 @@ fn test_yaml_vectors(
|
|||||||
attester_slashings: vec![],
|
attester_slashings: vec![],
|
||||||
attestations: vec![],
|
attestations: vec![],
|
||||||
deposits: vec![],
|
deposits: vec![],
|
||||||
exits: vec![],
|
voluntary_exits: vec![],
|
||||||
|
transfers: vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
// process the tests
|
// process the tests
|
||||||
@ -249,9 +250,9 @@ fn setup_inital_state(
|
|||||||
withdrawal_credentials: zero_hash,
|
withdrawal_credentials: zero_hash,
|
||||||
activation_epoch: Epoch::from(0u64),
|
activation_epoch: Epoch::from(0u64),
|
||||||
exit_epoch: spec.far_future_epoch,
|
exit_epoch: spec.far_future_epoch,
|
||||||
withdrawal_epoch: spec.far_future_epoch,
|
withdrawable_epoch: spec.far_future_epoch,
|
||||||
penalized_epoch: spec.far_future_epoch,
|
initiated_exit: false,
|
||||||
status_flags: None,
|
slashed: false,
|
||||||
};
|
};
|
||||||
// activate the validators
|
// activate the validators
|
||||||
for _ in 0..no_validators {
|
for _ in 0..no_validators {
|
||||||
|
@ -4,11 +4,22 @@ version = "0.1.0"
|
|||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "benches"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
criterion = "0.2"
|
||||||
|
env_logger = "0.6.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
bls = { path = "../utils/bls" }
|
||||||
hashing = { path = "../utils/hashing" }
|
hashing = { path = "../utils/hashing" }
|
||||||
int_to_bytes = { path = "../utils/int_to_bytes" }
|
int_to_bytes = { path = "../utils/int_to_bytes" }
|
||||||
integer-sqrt = "0.1"
|
integer-sqrt = "0.1"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
merkle_proof = { path = "../utils/merkle_proof" }
|
||||||
ssz = { path = "../utils/ssz" }
|
ssz = { path = "../utils/ssz" }
|
||||||
|
ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
rayon = "1.0"
|
rayon = "1.0"
|
||||||
|
65
eth2/state_processing/benches/benches.rs
Normal file
65
eth2/state_processing/benches/benches.rs
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
use criterion::Criterion;
|
||||||
|
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
|
||||||
|
// use env_logger::{Builder, Env};
|
||||||
|
use state_processing::SlotProcessable;
|
||||||
|
use types::beacon_state::BeaconStateBuilder;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
fn epoch_processing(c: &mut Criterion) {
|
||||||
|
// Builder::from_env(Env::default().default_filter_or("debug")).init();
|
||||||
|
|
||||||
|
let mut builder = BeaconStateBuilder::new(16_384);
|
||||||
|
|
||||||
|
builder.build_fast().unwrap();
|
||||||
|
builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4);
|
||||||
|
|
||||||
|
let mut state = builder.cloned_state();
|
||||||
|
|
||||||
|
// Build all the caches so the following state does _not_ include the cache-building time.
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Previous, &builder.spec)
|
||||||
|
.unwrap();
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Current, &builder.spec)
|
||||||
|
.unwrap();
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Next, &builder.spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let cached_state = state.clone();
|
||||||
|
|
||||||
|
// Drop all the caches so the following state includes the cache-building time.
|
||||||
|
state.drop_cache(RelativeEpoch::Previous);
|
||||||
|
state.drop_cache(RelativeEpoch::Current);
|
||||||
|
state.drop_cache(RelativeEpoch::Next);
|
||||||
|
|
||||||
|
let cacheless_state = state;
|
||||||
|
|
||||||
|
let spec_a = builder.spec.clone();
|
||||||
|
let spec_b = builder.spec.clone();
|
||||||
|
|
||||||
|
c.bench(
|
||||||
|
"epoch processing",
|
||||||
|
Benchmark::new("with pre-built caches", move |b| {
|
||||||
|
b.iter_with_setup(
|
||||||
|
|| cached_state.clone(),
|
||||||
|
|mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_a).unwrap()),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.sample_size(10),
|
||||||
|
);
|
||||||
|
|
||||||
|
c.bench(
|
||||||
|
"epoch processing",
|
||||||
|
Benchmark::new("without pre-built caches", move |b| {
|
||||||
|
b.iter_with_setup(
|
||||||
|
|| cacheless_state.clone(),
|
||||||
|
|mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_b).unwrap()),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.sample_size(10),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, epoch_processing,);
|
||||||
|
criterion_main!(benches);
|
@ -1,424 +0,0 @@
|
|||||||
use crate::SlotProcessingError;
|
|
||||||
use hashing::hash;
|
|
||||||
use int_to_bytes::int_to_bytes32;
|
|
||||||
use log::{debug, trace};
|
|
||||||
use ssz::{ssz_encode, TreeHash};
|
|
||||||
use types::{
|
|
||||||
beacon_state::{AttestationParticipantsError, BeaconStateError},
|
|
||||||
AggregatePublicKey, Attestation, BeaconBlock, BeaconState, ChainSpec, Crosslink, Epoch, Exit,
|
|
||||||
Fork, Hash256, PendingAttestation, PublicKey, Signature,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: define elsehwere.
|
|
||||||
const DOMAIN_PROPOSAL: u64 = 2;
|
|
||||||
const DOMAIN_EXIT: u64 = 3;
|
|
||||||
const DOMAIN_RANDAO: u64 = 4;
|
|
||||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
|
||||||
const DOMAIN_ATTESTATION: u64 = 1;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
DBError(String),
|
|
||||||
StateAlreadyTransitioned,
|
|
||||||
PresentSlotIsNone,
|
|
||||||
UnableToDecodeBlock,
|
|
||||||
MissingParentState(Hash256),
|
|
||||||
InvalidParentState(Hash256),
|
|
||||||
MissingBeaconBlock(Hash256),
|
|
||||||
InvalidBeaconBlock(Hash256),
|
|
||||||
MissingParentBlock(Hash256),
|
|
||||||
NoBlockProducer,
|
|
||||||
StateSlotMismatch,
|
|
||||||
BadBlockSignature,
|
|
||||||
BadRandaoSignature,
|
|
||||||
MaxProposerSlashingsExceeded,
|
|
||||||
BadProposerSlashing,
|
|
||||||
MaxAttestationsExceeded,
|
|
||||||
InvalidAttestation(AttestationValidationError),
|
|
||||||
NoBlockRoot,
|
|
||||||
MaxDepositsExceeded,
|
|
||||||
MaxExitsExceeded,
|
|
||||||
BadExit,
|
|
||||||
BadCustodyReseeds,
|
|
||||||
BadCustodyChallenges,
|
|
||||||
BadCustodyResponses,
|
|
||||||
BeaconStateError(BeaconStateError),
|
|
||||||
SlotProcessingError(SlotProcessingError),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum AttestationValidationError {
|
|
||||||
IncludedTooEarly,
|
|
||||||
IncludedTooLate,
|
|
||||||
WrongJustifiedSlot,
|
|
||||||
WrongJustifiedRoot,
|
|
||||||
BadLatestCrosslinkRoot,
|
|
||||||
BadSignature,
|
|
||||||
ShardBlockRootNotZero,
|
|
||||||
NoBlockRoot,
|
|
||||||
AttestationParticipantsError(AttestationParticipantsError),
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! ensure {
|
|
||||||
($condition: expr, $result: expr) => {
|
|
||||||
if !$condition {
|
|
||||||
return Err($result);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait BlockProcessable {
|
|
||||||
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>;
|
|
||||||
fn per_block_processing_without_verifying_block_signature(
|
|
||||||
&mut self,
|
|
||||||
block: &BeaconBlock,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockProcessable for BeaconState {
|
|
||||||
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> {
|
|
||||||
per_block_processing_signature_optional(self, block, true, spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn per_block_processing_without_verifying_block_signature(
|
|
||||||
&mut self,
|
|
||||||
block: &BeaconBlock,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
per_block_processing_signature_optional(self, block, false, spec)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn per_block_processing_signature_optional(
|
|
||||||
state: &mut BeaconState,
|
|
||||||
block: &BeaconBlock,
|
|
||||||
verify_block_signature: bool,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
ensure!(block.slot == state.slot, Error::StateSlotMismatch);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Proposer Signature
|
|
||||||
*/
|
|
||||||
let block_proposer_index = state
|
|
||||||
.get_beacon_proposer_index(block.slot, spec)
|
|
||||||
.map_err(|_| Error::NoBlockProducer)?;
|
|
||||||
let block_proposer = &state.validator_registry[block_proposer_index];
|
|
||||||
|
|
||||||
if verify_block_signature {
|
|
||||||
ensure!(
|
|
||||||
bls_verify(
|
|
||||||
&block_proposer.pubkey,
|
|
||||||
&block.proposal_root(spec)[..],
|
|
||||||
&block.signature,
|
|
||||||
get_domain(&state.fork, state.current_epoch(spec), DOMAIN_PROPOSAL)
|
|
||||||
),
|
|
||||||
Error::BadBlockSignature
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* RANDAO
|
|
||||||
*/
|
|
||||||
ensure!(
|
|
||||||
bls_verify(
|
|
||||||
&block_proposer.pubkey,
|
|
||||||
&int_to_bytes32(state.current_epoch(spec).as_u64()),
|
|
||||||
&block.randao_reveal,
|
|
||||||
get_domain(&state.fork, state.current_epoch(spec), DOMAIN_RANDAO)
|
|
||||||
),
|
|
||||||
Error::BadRandaoSignature
|
|
||||||
);
|
|
||||||
|
|
||||||
// TODO: check this is correct.
|
|
||||||
let new_mix = {
|
|
||||||
let mut mix = state.latest_randao_mixes
|
|
||||||
[state.slot.as_usize() % spec.latest_randao_mixes_length]
|
|
||||||
.to_vec();
|
|
||||||
mix.append(&mut ssz_encode(&block.randao_reveal));
|
|
||||||
Hash256::from(&hash(&mix)[..])
|
|
||||||
};
|
|
||||||
|
|
||||||
state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Eth1 data
|
|
||||||
*/
|
|
||||||
// TODO: Eth1 data processing.
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Proposer slashings
|
|
||||||
*/
|
|
||||||
ensure!(
|
|
||||||
block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
|
|
||||||
Error::MaxProposerSlashingsExceeded
|
|
||||||
);
|
|
||||||
for proposer_slashing in &block.body.proposer_slashings {
|
|
||||||
let proposer = state
|
|
||||||
.validator_registry
|
|
||||||
.get(proposer_slashing.proposer_index as usize)
|
|
||||||
.ok_or(Error::BadProposerSlashing)?;
|
|
||||||
ensure!(
|
|
||||||
proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot,
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard,
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
proposer_slashing.proposal_data_1.block_root
|
|
||||||
!= proposer_slashing.proposal_data_2.block_root,
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
proposer.penalized_epoch > state.current_epoch(spec),
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
bls_verify(
|
|
||||||
&proposer.pubkey,
|
|
||||||
&proposer_slashing.proposal_data_1.hash_tree_root(),
|
|
||||||
&proposer_slashing.proposal_signature_1,
|
|
||||||
get_domain(
|
|
||||||
&state.fork,
|
|
||||||
proposer_slashing
|
|
||||||
.proposal_data_1
|
|
||||||
.slot
|
|
||||||
.epoch(spec.epoch_length),
|
|
||||||
DOMAIN_PROPOSAL
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
bls_verify(
|
|
||||||
&proposer.pubkey,
|
|
||||||
&proposer_slashing.proposal_data_2.hash_tree_root(),
|
|
||||||
&proposer_slashing.proposal_signature_2,
|
|
||||||
get_domain(
|
|
||||||
&state.fork,
|
|
||||||
proposer_slashing
|
|
||||||
.proposal_data_2
|
|
||||||
.slot
|
|
||||||
.epoch(spec.epoch_length),
|
|
||||||
DOMAIN_PROPOSAL
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Attestations
|
|
||||||
*/
|
|
||||||
ensure!(
|
|
||||||
block.body.attestations.len() as u64 <= spec.max_attestations,
|
|
||||||
Error::MaxAttestationsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
debug!("Verifying {} attestations.", block.body.attestations.len());
|
|
||||||
|
|
||||||
for attestation in &block.body.attestations {
|
|
||||||
validate_attestation(&state, attestation, spec)?;
|
|
||||||
|
|
||||||
let pending_attestation = PendingAttestation {
|
|
||||||
data: attestation.data.clone(),
|
|
||||||
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
|
|
||||||
custody_bitfield: attestation.custody_bitfield.clone(),
|
|
||||||
inclusion_slot: state.slot,
|
|
||||||
};
|
|
||||||
state.latest_attestations.push(pending_attestation);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Deposits
|
|
||||||
*/
|
|
||||||
ensure!(
|
|
||||||
block.body.deposits.len() as u64 <= spec.max_deposits,
|
|
||||||
Error::MaxDepositsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
// TODO: process deposits.
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Exits
|
|
||||||
*/
|
|
||||||
ensure!(
|
|
||||||
block.body.exits.len() as u64 <= spec.max_exits,
|
|
||||||
Error::MaxExitsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
for exit in &block.body.exits {
|
|
||||||
let validator = state
|
|
||||||
.validator_registry
|
|
||||||
.get(exit.validator_index as usize)
|
|
||||||
.ok_or(Error::BadExit)?;
|
|
||||||
ensure!(
|
|
||||||
validator.exit_epoch
|
|
||||||
> state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec),
|
|
||||||
Error::BadExit
|
|
||||||
);
|
|
||||||
ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit);
|
|
||||||
let exit_message = {
|
|
||||||
let exit_struct = Exit {
|
|
||||||
epoch: exit.epoch,
|
|
||||||
validator_index: exit.validator_index,
|
|
||||||
signature: spec.empty_signature.clone(),
|
|
||||||
};
|
|
||||||
exit_struct.hash_tree_root()
|
|
||||||
};
|
|
||||||
ensure!(
|
|
||||||
bls_verify(
|
|
||||||
&validator.pubkey,
|
|
||||||
&exit_message,
|
|
||||||
&exit.signature,
|
|
||||||
get_domain(&state.fork, exit.epoch, DOMAIN_EXIT)
|
|
||||||
),
|
|
||||||
Error::BadProposerSlashing
|
|
||||||
);
|
|
||||||
state.initiate_validator_exit(exit.validator_index as usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("State transition complete.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validate_attestation(
|
|
||||||
state: &BeaconState,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), AttestationValidationError> {
|
|
||||||
validate_attestation_signature_optional(state, attestation, spec, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validate_attestation_without_signature(
|
|
||||||
state: &BeaconState,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), AttestationValidationError> {
|
|
||||||
validate_attestation_signature_optional(state, attestation, spec, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_attestation_signature_optional(
|
|
||||||
state: &BeaconState,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
verify_signature: bool,
|
|
||||||
) -> Result<(), AttestationValidationError> {
|
|
||||||
trace!(
|
|
||||||
"validate_attestation_signature_optional: attestation epoch: {}",
|
|
||||||
attestation.data.slot.epoch(spec.epoch_length)
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
|
|
||||||
AttestationValidationError::IncludedTooEarly
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
attestation.data.slot + spec.epoch_length >= state.slot,
|
|
||||||
AttestationValidationError::IncludedTooLate
|
|
||||||
);
|
|
||||||
if attestation.data.slot >= state.current_epoch_start_slot(spec) {
|
|
||||||
ensure!(
|
|
||||||
attestation.data.justified_epoch == state.justified_epoch,
|
|
||||||
AttestationValidationError::WrongJustifiedSlot
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
ensure!(
|
|
||||||
attestation.data.justified_epoch == state.previous_justified_epoch,
|
|
||||||
AttestationValidationError::WrongJustifiedSlot
|
|
||||||
);
|
|
||||||
}
|
|
||||||
ensure!(
|
|
||||||
attestation.data.justified_block_root
|
|
||||||
== *state
|
|
||||||
.get_block_root(
|
|
||||||
attestation
|
|
||||||
.data
|
|
||||||
.justified_epoch
|
|
||||||
.start_slot(spec.epoch_length),
|
|
||||||
&spec
|
|
||||||
)
|
|
||||||
.ok_or(AttestationValidationError::NoBlockRoot)?,
|
|
||||||
AttestationValidationError::WrongJustifiedRoot
|
|
||||||
);
|
|
||||||
let potential_crosslink = Crosslink {
|
|
||||||
shard_block_root: attestation.data.shard_block_root,
|
|
||||||
epoch: attestation.data.slot.epoch(spec.epoch_length),
|
|
||||||
};
|
|
||||||
ensure!(
|
|
||||||
(attestation.data.latest_crosslink
|
|
||||||
== state.latest_crosslinks[attestation.data.shard as usize])
|
|
||||||
| (attestation.data.latest_crosslink == potential_crosslink),
|
|
||||||
AttestationValidationError::BadLatestCrosslinkRoot
|
|
||||||
);
|
|
||||||
if verify_signature {
|
|
||||||
let participants = state.get_attestation_participants(
|
|
||||||
&attestation.data,
|
|
||||||
&attestation.aggregation_bitfield,
|
|
||||||
spec,
|
|
||||||
)?;
|
|
||||||
let mut group_public_key = AggregatePublicKey::new();
|
|
||||||
for participant in participants {
|
|
||||||
group_public_key.add(
|
|
||||||
state.validator_registry[participant as usize]
|
|
||||||
.pubkey
|
|
||||||
.as_raw(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
ensure!(
|
|
||||||
attestation.verify_signature(
|
|
||||||
&group_public_key,
|
|
||||||
PHASE_0_CUSTODY_BIT,
|
|
||||||
get_domain(
|
|
||||||
&state.fork,
|
|
||||||
attestation.data.slot.epoch(spec.epoch_length),
|
|
||||||
DOMAIN_ATTESTATION,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
AttestationValidationError::BadSignature
|
|
||||||
);
|
|
||||||
}
|
|
||||||
ensure!(
|
|
||||||
attestation.data.shard_block_root == spec.zero_hash,
|
|
||||||
AttestationValidationError::ShardBlockRootNotZero
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_domain(_fork: &Fork, _epoch: Epoch, _domain_type: u64) -> u64 {
|
|
||||||
// TODO: stubbed out.
|
|
||||||
0
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool {
|
|
||||||
// TODO: add domain
|
|
||||||
signature.verify(message, pubkey)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<AttestationValidationError> for Error {
|
|
||||||
fn from(e: AttestationValidationError) -> Error {
|
|
||||||
Error::InvalidAttestation(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconStateError> for Error {
|
|
||||||
fn from(e: BeaconStateError) -> Error {
|
|
||||||
Error::BeaconStateError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SlotProcessingError> for Error {
|
|
||||||
fn from(e: SlotProcessingError) -> Error {
|
|
||||||
Error::SlotProcessingError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<AttestationParticipantsError> for AttestationValidationError {
|
|
||||||
fn from(e: AttestationParticipantsError) -> AttestationValidationError {
|
|
||||||
AttestationValidationError::AttestationParticipantsError(e)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,729 +0,0 @@
|
|||||||
use integer_sqrt::IntegerSquareRoot;
|
|
||||||
use log::{debug, trace};
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use ssz::TreeHash;
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{
|
|
||||||
beacon_state::{AttestationParticipantsError, BeaconStateError, InclusionError},
|
|
||||||
validator_registry::get_active_validator_indices,
|
|
||||||
BeaconState, ChainSpec, Crosslink, Epoch, Hash256, PendingAttestation,
|
|
||||||
};
|
|
||||||
|
|
||||||
macro_rules! safe_add_assign {
|
|
||||||
($a: expr, $b: expr) => {
|
|
||||||
$a = $a.saturating_add($b);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
macro_rules! safe_sub_assign {
|
|
||||||
($a: expr, $b: expr) => {
|
|
||||||
$a = $a.saturating_sub($b);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
UnableToDetermineProducer,
|
|
||||||
NoBlockRoots,
|
|
||||||
BaseRewardQuotientIsZero,
|
|
||||||
NoRandaoSeed,
|
|
||||||
BeaconStateError(BeaconStateError),
|
|
||||||
AttestationParticipantsError(AttestationParticipantsError),
|
|
||||||
InclusionError(InclusionError),
|
|
||||||
WinningRootError(WinningRootError),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum WinningRootError {
|
|
||||||
NoWinningRoot,
|
|
||||||
AttestationParticipantsError(AttestationParticipantsError),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct WinningRoot {
|
|
||||||
pub shard_block_root: Hash256,
|
|
||||||
pub attesting_validator_indices: Vec<usize>,
|
|
||||||
pub total_balance: u64,
|
|
||||||
pub total_attesting_balance: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait EpochProcessable {
|
|
||||||
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EpochProcessable for BeaconState {
|
|
||||||
// Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it
|
|
||||||
// remains monolithic to allow for easier spec updates. Once the spec is more stable we can
|
|
||||||
// optimise.
|
|
||||||
#[allow(clippy::cyclomatic_complexity)]
|
|
||||||
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> {
|
|
||||||
let current_epoch = self.current_epoch(spec);
|
|
||||||
let previous_epoch = self.previous_epoch(spec);
|
|
||||||
let next_epoch = self.next_epoch(spec);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Starting per-epoch processing on epoch {}...",
|
|
||||||
self.current_epoch(spec)
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators attesting during the current epoch.
|
|
||||||
*/
|
|
||||||
let active_validator_indices = get_active_validator_indices(
|
|
||||||
&self.validator_registry,
|
|
||||||
self.slot.epoch(spec.epoch_length),
|
|
||||||
);
|
|
||||||
let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec);
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"{} validators with a total balance of {} wei.",
|
|
||||||
active_validator_indices.len(),
|
|
||||||
current_total_balance
|
|
||||||
);
|
|
||||||
|
|
||||||
let current_epoch_attestations: Vec<&PendingAttestation> = self
|
|
||||||
.latest_attestations
|
|
||||||
.par_iter()
|
|
||||||
.filter(|a| {
|
|
||||||
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
|
|
||||||
== self.current_epoch(spec)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"Current epoch attestations: {}",
|
|
||||||
current_epoch_attestations.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let current_epoch_boundary_attestations: Vec<&PendingAttestation> =
|
|
||||||
current_epoch_attestations
|
|
||||||
.par_iter()
|
|
||||||
.filter(
|
|
||||||
|a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) {
|
|
||||||
Some(block_root) => {
|
|
||||||
(a.data.epoch_boundary_root == *block_root)
|
|
||||||
&& (a.data.justified_epoch == self.justified_epoch)
|
|
||||||
}
|
|
||||||
None => unreachable!(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let current_epoch_boundary_attester_indices = self
|
|
||||||
.get_attestation_participants_union(¤t_epoch_boundary_attestations[..], spec)?;
|
|
||||||
let current_epoch_boundary_attesting_balance =
|
|
||||||
self.get_total_balance(¤t_epoch_boundary_attester_indices[..], spec);
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"Current epoch boundary attesters: {}",
|
|
||||||
current_epoch_boundary_attester_indices.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators attesting during the previous epoch
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators that made an attestation during the previous epoch
|
|
||||||
*/
|
|
||||||
let previous_epoch_attestations: Vec<&PendingAttestation> = self
|
|
||||||
.latest_attestations
|
|
||||||
.par_iter()
|
|
||||||
.filter(|a| {
|
|
||||||
//TODO: ensure these saturating subs are correct.
|
|
||||||
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
|
|
||||||
== self.previous_epoch(spec)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"previous epoch attestations: {}",
|
|
||||||
previous_epoch_attestations.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let previous_epoch_attester_indices =
|
|
||||||
self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?;
|
|
||||||
let previous_total_balance = self.get_total_balance(
|
|
||||||
&get_active_validator_indices(&self.validator_registry, previous_epoch),
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators targetting the previous justified slot
|
|
||||||
*/
|
|
||||||
let previous_epoch_justified_attestations: Vec<&PendingAttestation> = {
|
|
||||||
let mut a: Vec<&PendingAttestation> = current_epoch_attestations
|
|
||||||
.iter()
|
|
||||||
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let mut b: Vec<&PendingAttestation> = previous_epoch_attestations
|
|
||||||
.iter()
|
|
||||||
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
a.append(&mut b);
|
|
||||||
a
|
|
||||||
};
|
|
||||||
|
|
||||||
let previous_epoch_justified_attester_indices = self
|
|
||||||
.get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?;
|
|
||||||
let previous_epoch_justified_attesting_balance =
|
|
||||||
self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators justifying the epoch boundary block at the start of the previous epoch
|
|
||||||
*/
|
|
||||||
let previous_epoch_boundary_attestations: Vec<&PendingAttestation> =
|
|
||||||
previous_epoch_justified_attestations
|
|
||||||
.iter()
|
|
||||||
.filter(
|
|
||||||
|a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) {
|
|
||||||
Some(block_root) => a.data.epoch_boundary_root == *block_root,
|
|
||||||
None => unreachable!(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let previous_epoch_boundary_attester_indices = self
|
|
||||||
.get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?;
|
|
||||||
let previous_epoch_boundary_attesting_balance =
|
|
||||||
self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validators attesting to the expected beacon chain head during the previous epoch.
|
|
||||||
*/
|
|
||||||
let previous_epoch_head_attestations: Vec<&PendingAttestation> =
|
|
||||||
previous_epoch_attestations
|
|
||||||
.iter()
|
|
||||||
.filter(|a| match self.get_block_root(a.data.slot, spec) {
|
|
||||||
Some(block_root) => a.data.beacon_block_root == *block_root,
|
|
||||||
None => unreachable!(),
|
|
||||||
})
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let previous_epoch_head_attester_indices =
|
|
||||||
self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?;
|
|
||||||
let previous_epoch_head_attesting_balance =
|
|
||||||
self.get_total_balance(&previous_epoch_head_attester_indices[..], spec);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"previous_epoch_head_attester_balance of {} wei.",
|
|
||||||
previous_epoch_head_attesting_balance
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Eth1 Data
|
|
||||||
*/
|
|
||||||
if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 {
|
|
||||||
for eth1_data_vote in &self.eth1_data_votes {
|
|
||||||
if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period {
|
|
||||||
self.latest_eth1_data = eth1_data_vote.eth1_data.clone();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.eth1_data_votes = vec![];
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Justification
|
|
||||||
*/
|
|
||||||
|
|
||||||
let mut new_justified_epoch = self.justified_epoch;
|
|
||||||
self.justification_bitfield <<= 1;
|
|
||||||
|
|
||||||
// If > 2/3 of the total balance attested to the previous epoch boundary
|
|
||||||
//
|
|
||||||
// - Set the 2nd bit of the bitfield.
|
|
||||||
// - Set the previous epoch to be justified.
|
|
||||||
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
|
|
||||||
self.justification_bitfield |= 2;
|
|
||||||
new_justified_epoch = previous_epoch;
|
|
||||||
trace!(">= 2/3 voted for previous epoch boundary");
|
|
||||||
}
|
|
||||||
// If > 2/3 of the total balance attested to the previous epoch boundary
|
|
||||||
//
|
|
||||||
// - Set the 1st bit of the bitfield.
|
|
||||||
// - Set the current epoch to be justified.
|
|
||||||
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
|
|
||||||
self.justification_bitfield |= 1;
|
|
||||||
new_justified_epoch = current_epoch;
|
|
||||||
trace!(">= 2/3 voted for current epoch boundary");
|
|
||||||
}
|
|
||||||
|
|
||||||
// If:
|
|
||||||
//
|
|
||||||
// - All three epochs prior to this epoch have been justified.
|
|
||||||
// - The previous justified justified epoch was three epochs ago.
|
|
||||||
//
|
|
||||||
// Then, set the finalized epoch to be three epochs ago.
|
|
||||||
if ((self.justification_bitfield >> 1) % 8 == 0b111)
|
|
||||||
& (self.previous_justified_epoch == previous_epoch - 2)
|
|
||||||
{
|
|
||||||
self.finalized_epoch = self.previous_justified_epoch;
|
|
||||||
trace!("epoch - 3 was finalized (1st condition).");
|
|
||||||
}
|
|
||||||
// If:
|
|
||||||
//
|
|
||||||
// - Both two epochs prior to this epoch have been justified.
|
|
||||||
// - The previous justified epoch was two epochs ago.
|
|
||||||
//
|
|
||||||
// Then, set the finalized epoch to two epochs ago.
|
|
||||||
if ((self.justification_bitfield >> 1) % 4 == 0b11)
|
|
||||||
& (self.previous_justified_epoch == previous_epoch - 1)
|
|
||||||
{
|
|
||||||
self.finalized_epoch = self.previous_justified_epoch;
|
|
||||||
trace!("epoch - 2 was finalized (2nd condition).");
|
|
||||||
}
|
|
||||||
// If:
|
|
||||||
//
|
|
||||||
// - This epoch and the two prior have been justified.
|
|
||||||
// - The presently justified epoch was two epochs ago.
|
|
||||||
//
|
|
||||||
// Then, set the finalized epoch to two epochs ago.
|
|
||||||
if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1)
|
|
||||||
{
|
|
||||||
self.finalized_epoch = self.justified_epoch;
|
|
||||||
trace!("epoch - 2 was finalized (3rd condition).");
|
|
||||||
}
|
|
||||||
// If:
|
|
||||||
//
|
|
||||||
// - This epoch and the epoch prior to it have been justified.
|
|
||||||
// - Set the previous epoch to be justified.
|
|
||||||
//
|
|
||||||
// Then, set the finalized epoch to be the previous epoch.
|
|
||||||
if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) {
|
|
||||||
self.finalized_epoch = self.justified_epoch;
|
|
||||||
trace!("epoch - 1 was finalized (4th condition).");
|
|
||||||
}
|
|
||||||
|
|
||||||
self.previous_justified_epoch = self.justified_epoch;
|
|
||||||
self.justified_epoch = new_justified_epoch;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Finalized epoch {}, justified epoch {}.",
|
|
||||||
self.finalized_epoch, self.justified_epoch
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Crosslinks
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Cached for later lookups.
|
|
||||||
let mut winning_root_for_shards: HashMap<u64, Result<WinningRoot, WinningRootError>> =
|
|
||||||
HashMap::new();
|
|
||||||
|
|
||||||
// for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot {
|
|
||||||
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
|
|
||||||
trace!(
|
|
||||||
"Finding winning root for slot: {} (epoch: {})",
|
|
||||||
slot,
|
|
||||||
slot.epoch(spec.epoch_length)
|
|
||||||
);
|
|
||||||
let crosslink_committees_at_slot =
|
|
||||||
self.get_crosslink_committees_at_slot(slot, false, spec)?;
|
|
||||||
|
|
||||||
for (crosslink_committee, shard) in crosslink_committees_at_slot {
|
|
||||||
let shard = shard as u64;
|
|
||||||
|
|
||||||
let winning_root = winning_root(
|
|
||||||
self,
|
|
||||||
shard,
|
|
||||||
¤t_epoch_attestations,
|
|
||||||
&previous_epoch_attestations,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Ok(winning_root) = &winning_root {
|
|
||||||
let total_committee_balance =
|
|
||||||
self.get_total_balance(&crosslink_committee[..], spec);
|
|
||||||
|
|
||||||
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
|
|
||||||
self.latest_crosslinks[shard as usize] = Crosslink {
|
|
||||||
epoch: current_epoch,
|
|
||||||
shard_block_root: winning_root.shard_block_root,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
winning_root_for_shards.insert(shard, winning_root);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"Found {} winning shard roots.",
|
|
||||||
winning_root_for_shards.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Rewards and Penalities
|
|
||||||
*/
|
|
||||||
let base_reward_quotient =
|
|
||||||
previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
|
|
||||||
if base_reward_quotient == 0 {
|
|
||||||
return Err(Error::BaseRewardQuotientIsZero);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Justification and finalization
|
|
||||||
*/
|
|
||||||
let epochs_since_finality = next_epoch - self.finalized_epoch;
|
|
||||||
|
|
||||||
let previous_epoch_justified_attester_indices_hashset: HashSet<usize> =
|
|
||||||
HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned());
|
|
||||||
let previous_epoch_boundary_attester_indices_hashset: HashSet<usize> =
|
|
||||||
HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned());
|
|
||||||
let previous_epoch_head_attester_indices_hashset: HashSet<usize> =
|
|
||||||
HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned());
|
|
||||||
let previous_epoch_attester_indices_hashset: HashSet<usize> =
|
|
||||||
HashSet::from_iter(previous_epoch_attester_indices.iter().cloned());
|
|
||||||
let active_validator_indices_hashset: HashSet<usize> =
|
|
||||||
HashSet::from_iter(active_validator_indices.iter().cloned());
|
|
||||||
|
|
||||||
debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len());
|
|
||||||
|
|
||||||
debug!("{} epochs since finality.", epochs_since_finality);
|
|
||||||
|
|
||||||
if epochs_since_finality <= 4 {
|
|
||||||
for index in 0..self.validator_balances.len() {
|
|
||||||
let base_reward = self.base_reward(index, base_reward_quotient, spec);
|
|
||||||
|
|
||||||
if previous_epoch_justified_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward * previous_epoch_justified_attesting_balance
|
|
||||||
/ previous_total_balance
|
|
||||||
);
|
|
||||||
} else if active_validator_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], base_reward);
|
|
||||||
}
|
|
||||||
|
|
||||||
if previous_epoch_boundary_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward * previous_epoch_boundary_attesting_balance
|
|
||||||
/ previous_total_balance
|
|
||||||
);
|
|
||||||
} else if active_validator_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], base_reward);
|
|
||||||
}
|
|
||||||
|
|
||||||
if previous_epoch_head_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward * previous_epoch_head_attesting_balance
|
|
||||||
/ previous_total_balance
|
|
||||||
);
|
|
||||||
} else if active_validator_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], base_reward);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for index in previous_epoch_attester_indices {
|
|
||||||
let base_reward = self.base_reward(index, base_reward_quotient, spec);
|
|
||||||
let inclusion_distance =
|
|
||||||
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
|
|
||||||
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for index in 0..self.validator_balances.len() {
|
|
||||||
let inactivity_penalty = self.inactivity_penalty(
|
|
||||||
index,
|
|
||||||
epochs_since_finality,
|
|
||||||
base_reward_quotient,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
if active_validator_indices_hashset.contains(&index) {
|
|
||||||
if !previous_epoch_justified_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
|
|
||||||
}
|
|
||||||
if !previous_epoch_boundary_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
|
|
||||||
}
|
|
||||||
if !previous_epoch_head_attester_indices_hashset.contains(&index) {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.validator_registry[index].penalized_epoch <= current_epoch {
|
|
||||||
let base_reward = self.base_reward(index, base_reward_quotient, spec);
|
|
||||||
safe_sub_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
2 * inactivity_penalty + base_reward
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for index in previous_epoch_attester_indices {
|
|
||||||
let base_reward = self.base_reward(index, base_reward_quotient, spec);
|
|
||||||
let inclusion_distance =
|
|
||||||
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
|
|
||||||
|
|
||||||
safe_sub_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward
|
|
||||||
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("Processed validator justification and finalization rewards/penalities.");
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Attestation inclusion
|
|
||||||
*/
|
|
||||||
for &index in &previous_epoch_attester_indices_hashset {
|
|
||||||
let inclusion_slot =
|
|
||||||
self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?;
|
|
||||||
let proposer_index = self
|
|
||||||
.get_beacon_proposer_index(inclusion_slot, spec)
|
|
||||||
.map_err(|_| Error::UnableToDetermineProducer)?;
|
|
||||||
let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec);
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[proposer_index],
|
|
||||||
base_reward / spec.includer_reward_quotient
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"Previous epoch attesters: {}.",
|
|
||||||
previous_epoch_attester_indices_hashset.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Crosslinks
|
|
||||||
*/
|
|
||||||
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
|
|
||||||
let crosslink_committees_at_slot =
|
|
||||||
self.get_crosslink_committees_at_slot(slot, false, spec)?;
|
|
||||||
|
|
||||||
for (_crosslink_committee, shard) in crosslink_committees_at_slot {
|
|
||||||
let shard = shard as u64;
|
|
||||||
|
|
||||||
if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) {
|
|
||||||
// TODO: remove the map.
|
|
||||||
let attesting_validator_indices: HashSet<usize> = HashSet::from_iter(
|
|
||||||
winning_root.attesting_validator_indices.iter().cloned(),
|
|
||||||
);
|
|
||||||
|
|
||||||
for index in 0..self.validator_balances.len() {
|
|
||||||
let base_reward = self.base_reward(index, base_reward_quotient, spec);
|
|
||||||
|
|
||||||
if attesting_validator_indices.contains(&index) {
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[index],
|
|
||||||
base_reward * winning_root.total_attesting_balance
|
|
||||||
/ winning_root.total_balance
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
safe_sub_assign!(self.validator_balances[index], base_reward);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for index in &winning_root.attesting_validator_indices {
|
|
||||||
let base_reward = self.base_reward(*index, base_reward_quotient, spec);
|
|
||||||
safe_add_assign!(
|
|
||||||
self.validator_balances[*index],
|
|
||||||
base_reward * winning_root.total_attesting_balance
|
|
||||||
/ winning_root.total_balance
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ejections
|
|
||||||
*/
|
|
||||||
self.process_ejections(spec);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Validator Registry
|
|
||||||
*/
|
|
||||||
self.previous_calculation_epoch = self.current_calculation_epoch;
|
|
||||||
self.previous_epoch_start_shard = self.current_epoch_start_shard;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"setting previous_epoch_seed to : {}",
|
|
||||||
self.current_epoch_seed
|
|
||||||
);
|
|
||||||
|
|
||||||
self.previous_epoch_seed = self.current_epoch_seed;
|
|
||||||
|
|
||||||
let should_update_validator_registy = if self.finalized_epoch
|
|
||||||
> self.validator_registry_update_epoch
|
|
||||||
{
|
|
||||||
(0..self.get_current_epoch_committee_count(spec)).all(|i| {
|
|
||||||
let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count;
|
|
||||||
self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
};
|
|
||||||
|
|
||||||
if should_update_validator_registy {
|
|
||||||
trace!("updating validator registry.");
|
|
||||||
self.update_validator_registry(spec);
|
|
||||||
|
|
||||||
self.current_calculation_epoch = next_epoch;
|
|
||||||
self.current_epoch_start_shard = (self.current_epoch_start_shard
|
|
||||||
+ self.get_current_epoch_committee_count(spec) as u64)
|
|
||||||
% spec.shard_count;
|
|
||||||
self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)?
|
|
||||||
} else {
|
|
||||||
trace!("not updating validator registry.");
|
|
||||||
let epochs_since_last_registry_update =
|
|
||||||
current_epoch - self.validator_registry_update_epoch;
|
|
||||||
if (epochs_since_last_registry_update > 1)
|
|
||||||
& epochs_since_last_registry_update.is_power_of_two()
|
|
||||||
{
|
|
||||||
self.current_calculation_epoch = next_epoch;
|
|
||||||
self.current_epoch_seed =
|
|
||||||
self.generate_seed(self.current_calculation_epoch, spec)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.process_penalties_and_exits(spec);
|
|
||||||
|
|
||||||
self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize)
|
|
||||||
% spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices(
|
|
||||||
&self.validator_registry,
|
|
||||||
next_epoch + Epoch::from(spec.entry_exit_delay),
|
|
||||||
));
|
|
||||||
self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] =
|
|
||||||
self.latest_penalized_balances
|
|
||||||
[current_epoch.as_usize() % spec.latest_penalized_exit_length];
|
|
||||||
self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self
|
|
||||||
.get_randao_mix(current_epoch, spec)
|
|
||||||
.and_then(|x| Some(*x))
|
|
||||||
.ok_or_else(|| Error::NoRandaoSeed)?;
|
|
||||||
self.latest_attestations = self
|
|
||||||
.latest_attestations
|
|
||||||
.iter()
|
|
||||||
.filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
debug!("Epoch transition complete.");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
|
|
||||||
Hash256::from(&input.hash_tree_root()[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn winning_root(
|
|
||||||
state: &BeaconState,
|
|
||||||
shard: u64,
|
|
||||||
current_epoch_attestations: &[&PendingAttestation],
|
|
||||||
previous_epoch_attestations: &[&PendingAttestation],
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<WinningRoot, WinningRootError> {
|
|
||||||
let mut attestations = current_epoch_attestations.to_vec();
|
|
||||||
attestations.append(&mut previous_epoch_attestations.to_vec());
|
|
||||||
|
|
||||||
let mut candidates: HashMap<Hash256, WinningRoot> = HashMap::new();
|
|
||||||
|
|
||||||
let mut highest_seen_balance = 0;
|
|
||||||
|
|
||||||
for a in &attestations {
|
|
||||||
if a.data.shard != shard {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let shard_block_root = &a.data.shard_block_root;
|
|
||||||
|
|
||||||
if candidates.contains_key(shard_block_root) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: `cargo fmt` makes this rather ugly; tidy up.
|
|
||||||
let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result<
|
|
||||||
_,
|
|
||||||
AttestationParticipantsError,
|
|
||||||
>>(vec![], |mut acc, a| {
|
|
||||||
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
|
|
||||||
acc.append(&mut state.get_attestation_participants(
|
|
||||||
&a.data,
|
|
||||||
&a.aggregation_bitfield,
|
|
||||||
spec,
|
|
||||||
)?);
|
|
||||||
}
|
|
||||||
Ok(acc)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let total_balance: u64 = attesting_validator_indices
|
|
||||||
.iter()
|
|
||||||
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
|
|
||||||
|
|
||||||
let total_attesting_balance: u64 = attesting_validator_indices
|
|
||||||
.iter()
|
|
||||||
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
|
|
||||||
|
|
||||||
if total_attesting_balance > highest_seen_balance {
|
|
||||||
highest_seen_balance = total_attesting_balance;
|
|
||||||
}
|
|
||||||
|
|
||||||
let candidate_root = WinningRoot {
|
|
||||||
shard_block_root: *shard_block_root,
|
|
||||||
attesting_validator_indices,
|
|
||||||
total_attesting_balance,
|
|
||||||
total_balance,
|
|
||||||
};
|
|
||||||
|
|
||||||
candidates.insert(*shard_block_root, candidate_root);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(candidates
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(_hash, candidate)| {
|
|
||||||
if candidate.total_attesting_balance == highest_seen_balance {
|
|
||||||
Some(candidate)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.min_by_key(|candidate| candidate.shard_block_root)
|
|
||||||
.ok_or_else(|| WinningRootError::NoWinningRoot)?
|
|
||||||
// TODO: avoid clone.
|
|
||||||
.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<InclusionError> for Error {
|
|
||||||
fn from(e: InclusionError) -> Error {
|
|
||||||
Error::InclusionError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconStateError> for Error {
|
|
||||||
fn from(e: BeaconStateError) -> Error {
|
|
||||||
Error::BeaconStateError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<AttestationParticipantsError> for Error {
|
|
||||||
fn from(e: AttestationParticipantsError) -> Error {
|
|
||||||
Error::AttestationParticipantsError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<AttestationParticipantsError> for WinningRootError {
|
|
||||||
fn from(e: AttestationParticipantsError) -> WinningRootError {
|
|
||||||
WinningRootError::AttestationParticipantsError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
#[test]
|
|
||||||
fn it_works() {
|
|
||||||
assert_eq!(2 + 2, 4);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,10 +1,13 @@
|
|||||||
mod block_processable;
|
#[macro_use]
|
||||||
mod epoch_processable;
|
mod macros;
|
||||||
mod slot_processable;
|
|
||||||
|
|
||||||
pub use block_processable::{
|
pub mod per_block_processing;
|
||||||
validate_attestation, validate_attestation_without_signature, BlockProcessable,
|
pub mod per_epoch_processing;
|
||||||
Error as BlockProcessingError,
|
pub mod per_slot_processing;
|
||||||
|
|
||||||
|
pub use per_block_processing::{
|
||||||
|
errors::{BlockInvalid, BlockProcessingError},
|
||||||
|
per_block_processing, per_block_processing_without_verifying_block_signature,
|
||||||
};
|
};
|
||||||
pub use epoch_processable::{EpochProcessable, Error as EpochProcessingError};
|
pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing};
|
||||||
pub use slot_processable::{Error as SlotProcessingError, SlotProcessable};
|
pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};
|
||||||
|
24
eth2/state_processing/src/macros.rs
Normal file
24
eth2/state_processing/src/macros.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
macro_rules! verify {
|
||||||
|
($condition: expr, $result: expr) => {
|
||||||
|
if !$condition {
|
||||||
|
return Err(Error::Invalid($result));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! invalid {
|
||||||
|
($result: expr) => {
|
||||||
|
return Err(Error::Invalid($result));
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! safe_add_assign {
|
||||||
|
($a: expr, $b: expr) => {
|
||||||
|
$a = $a.saturating_add($b);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
macro_rules! safe_sub_assign {
|
||||||
|
($a: expr, $b: expr) => {
|
||||||
|
$a = $a.saturating_sub($b);
|
||||||
|
};
|
||||||
|
}
|
386
eth2/state_processing/src/per_block_processing.rs
Normal file
386
eth2/state_processing/src/per_block_processing.rs
Normal file
@ -0,0 +1,386 @@
|
|||||||
|
use self::verify_proposer_slashing::verify_proposer_slashing;
|
||||||
|
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
|
||||||
|
use hashing::hash;
|
||||||
|
use log::debug;
|
||||||
|
use ssz::{ssz_encode, SignedRoot, TreeHash};
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
pub use self::verify_attester_slashing::verify_attester_slashing;
|
||||||
|
pub use validate_attestation::{validate_attestation, validate_attestation_without_signature};
|
||||||
|
pub use verify_deposit::verify_deposit;
|
||||||
|
pub use verify_exit::verify_exit;
|
||||||
|
pub use verify_transfer::{execute_transfer, verify_transfer};
|
||||||
|
|
||||||
|
pub mod errors;
|
||||||
|
mod validate_attestation;
|
||||||
|
mod verify_attester_slashing;
|
||||||
|
mod verify_deposit;
|
||||||
|
mod verify_exit;
|
||||||
|
mod verify_proposer_slashing;
|
||||||
|
mod verify_slashable_attestation;
|
||||||
|
mod verify_transfer;
|
||||||
|
|
||||||
|
// Set to `true` to check the merkle proof that a deposit is in the eth1 deposit root.
|
||||||
|
//
|
||||||
|
// Presently disabled to make testing easier.
|
||||||
|
const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false;
|
||||||
|
|
||||||
|
/// Updates the state for a new block, whilst validating that the block is valid.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn per_block_processing(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
block: &BeaconBlock,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
per_block_processing_signature_optional(state, block, true, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the state for a new block, whilst validating that the block is valid, without actually
|
||||||
|
/// checking the block proposer signature.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn per_block_processing_without_verifying_block_signature(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
block: &BeaconBlock,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
per_block_processing_signature_optional(state, block, false, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the state for a new block, whilst validating that the block is valid, optionally
|
||||||
|
/// checking the block proposer signature.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn per_block_processing_signature_optional(
|
||||||
|
mut state: &mut BeaconState,
|
||||||
|
block: &BeaconBlock,
|
||||||
|
should_verify_block_signature: bool,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Verify that `block.slot == state.slot`.
|
||||||
|
verify!(block.slot == state.slot, Invalid::StateSlotMismatch);
|
||||||
|
|
||||||
|
// Ensure the current epoch cache is built.
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
||||||
|
|
||||||
|
if should_verify_block_signature {
|
||||||
|
verify_block_signature(&state, &block, &spec)?;
|
||||||
|
}
|
||||||
|
process_randao(&mut state, &block, &spec)?;
|
||||||
|
process_eth1_data(&mut state, &block.eth1_data)?;
|
||||||
|
process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?;
|
||||||
|
process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?;
|
||||||
|
process_attestations(&mut state, &block.body.attestations[..], spec)?;
|
||||||
|
process_deposits(&mut state, &block.body.deposits[..], spec)?;
|
||||||
|
process_exits(&mut state, &block.body.voluntary_exits[..], spec)?;
|
||||||
|
process_transfers(&mut state, &block.body.transfers[..], spec)?;
|
||||||
|
|
||||||
|
debug!("per_block_processing complete.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies the signature of a block.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_block_signature(
|
||||||
|
state: &BeaconState,
|
||||||
|
block: &BeaconBlock,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let block_proposer =
|
||||||
|
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?];
|
||||||
|
|
||||||
|
let proposal = Proposal {
|
||||||
|
slot: block.slot,
|
||||||
|
shard: spec.beacon_chain_shard_number,
|
||||||
|
block_root: Hash256::from_slice(&block.signed_root()[..]),
|
||||||
|
signature: block.signature.clone(),
|
||||||
|
};
|
||||||
|
let domain = spec.get_domain(
|
||||||
|
block.slot.epoch(spec.slots_per_epoch),
|
||||||
|
Domain::Proposal,
|
||||||
|
&state.fork,
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
proposal
|
||||||
|
.signature
|
||||||
|
.verify(&proposal.signed_root()[..], domain, &block_proposer.pubkey),
|
||||||
|
Invalid::BadSignature
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates
|
||||||
|
/// `state.latest_randao_mixes`.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_randao(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
block: &BeaconBlock,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Let `proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)]`.
|
||||||
|
let block_proposer =
|
||||||
|
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?];
|
||||||
|
|
||||||
|
// Verify that `bls_verify(pubkey=proposer.pubkey,
|
||||||
|
// message_hash=hash_tree_root(get_current_epoch(state)), signature=block.randao_reveal,
|
||||||
|
// domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO))`.
|
||||||
|
verify!(
|
||||||
|
block.randao_reveal.verify(
|
||||||
|
&state.current_epoch(spec).hash_tree_root()[..],
|
||||||
|
spec.get_domain(
|
||||||
|
block.slot.epoch(spec.slots_per_epoch),
|
||||||
|
Domain::Randao,
|
||||||
|
&state.fork
|
||||||
|
),
|
||||||
|
&block_proposer.pubkey
|
||||||
|
),
|
||||||
|
Invalid::BadRandaoSignature
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update the state's RANDAO mix with the one revealed in the block.
|
||||||
|
update_randao(state, &block.randao_reveal, spec)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> {
|
||||||
|
// Either increment the eth1_data vote count, or add a new eth1_data.
|
||||||
|
let matching_eth1_vote_index = state
|
||||||
|
.eth1_data_votes
|
||||||
|
.iter()
|
||||||
|
.position(|vote| vote.eth1_data == *eth1_data);
|
||||||
|
if let Some(index) = matching_eth1_vote_index {
|
||||||
|
state.eth1_data_votes[index].vote_count += 1;
|
||||||
|
} else {
|
||||||
|
state.eth1_data_votes.push(Eth1DataVote {
|
||||||
|
eth1_data: eth1_data.clone(),
|
||||||
|
vote_count: 1,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the present randao mix.
|
||||||
|
///
|
||||||
|
/// Set `state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] =
|
||||||
|
/// xor(get_randao_mix(state, get_current_epoch(state)), hash(block.randao_reveal))`.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn update_randao(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
reveal: &Signature,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), BeaconStateError> {
|
||||||
|
let hashed_reveal = {
|
||||||
|
let encoded_signature = ssz_encode(reveal);
|
||||||
|
Hash256::from_slice(&hash(&encoded_signature[..])[..])
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_epoch = state.slot.epoch(spec.slots_per_epoch);
|
||||||
|
|
||||||
|
let current_mix = state
|
||||||
|
.get_randao_mix(current_epoch, spec)
|
||||||
|
.ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?;
|
||||||
|
|
||||||
|
let new_mix = *current_mix ^ hashed_reveal;
|
||||||
|
|
||||||
|
let index = current_epoch.as_usize() % spec.latest_randao_mixes_length;
|
||||||
|
|
||||||
|
if index < state.latest_randao_mixes.len() {
|
||||||
|
state.latest_randao_mixes[index] = new_mix;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(BeaconStateError::InsufficientRandaoMixes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_proposer_slashings(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
proposer_slashings: &[ProposerSlashing],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
|
||||||
|
Invalid::MaxProposerSlashingsExceeded
|
||||||
|
);
|
||||||
|
for (i, proposer_slashing) in proposer_slashings.iter().enumerate() {
|
||||||
|
verify_proposer_slashing(proposer_slashing, &state, spec)
|
||||||
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
state.slash_validator(proposer_slashing.proposer_index as usize, spec)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `AttesterSlsashing` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_attester_slashings(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
attester_slashings: &[AttesterSlashing],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
attester_slashings.len() as u64 <= spec.max_attester_slashings,
|
||||||
|
Invalid::MaxAttesterSlashingsExceed
|
||||||
|
);
|
||||||
|
for (i, attester_slashing) in attester_slashings.iter().enumerate() {
|
||||||
|
let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec)
|
||||||
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
for i in slashable_indices {
|
||||||
|
state.slash_validator(i as usize, spec)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `Attestation` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_attestations(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
attestations: &[Attestation],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
attestations.len() as u64 <= spec.max_attestations,
|
||||||
|
Invalid::MaxAttestationsExceeded
|
||||||
|
);
|
||||||
|
for (i, attestation) in attestations.iter().enumerate() {
|
||||||
|
// Build the previous epoch cache only if required by an attestation.
|
||||||
|
if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) {
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
|
let pending_attestation = PendingAttestation {
|
||||||
|
data: attestation.data.clone(),
|
||||||
|
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
|
||||||
|
custody_bitfield: attestation.custody_bitfield.clone(),
|
||||||
|
inclusion_slot: state.slot,
|
||||||
|
};
|
||||||
|
state.latest_attestations.push(pending_attestation);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `Deposit` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_deposits(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
deposits: &[Deposit],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
deposits.len() as u64 <= spec.max_deposits,
|
||||||
|
Invalid::MaxDepositsExceeded
|
||||||
|
);
|
||||||
|
for (i, deposit) in deposits.iter().enumerate() {
|
||||||
|
verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec)
|
||||||
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
|
state
|
||||||
|
.process_deposit(
|
||||||
|
deposit.deposit_data.deposit_input.pubkey.clone(),
|
||||||
|
deposit.deposit_data.amount,
|
||||||
|
deposit
|
||||||
|
.deposit_data
|
||||||
|
.deposit_input
|
||||||
|
.proof_of_possession
|
||||||
|
.clone(),
|
||||||
|
deposit.deposit_data.deposit_input.withdrawal_credentials,
|
||||||
|
None,
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?;
|
||||||
|
|
||||||
|
state.deposit_index += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `Exit` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_exits(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
voluntary_exits: &[VoluntaryExit],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
voluntary_exits.len() as u64 <= spec.max_voluntary_exits,
|
||||||
|
Invalid::MaxExitsExceeded
|
||||||
|
);
|
||||||
|
for (i, exit) in voluntary_exits.iter().enumerate() {
|
||||||
|
verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
|
state.initiate_validator_exit(exit.validator_index as usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates each `Transfer` and updates the state, short-circuiting on an invalid object.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn process_transfers(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
transfers: &[Transfer],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
transfers.len() as u64 <= spec.max_transfers,
|
||||||
|
Invalid::MaxTransfersExceed
|
||||||
|
);
|
||||||
|
for (i, transfer) in transfers.iter().enumerate() {
|
||||||
|
verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?;
|
||||||
|
execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
387
eth2/state_processing/src/per_block_processing/errors.rs
Normal file
387
eth2/state_processing/src/per_block_processing/errors.rs
Normal file
@ -0,0 +1,387 @@
|
|||||||
|
use types::*;
|
||||||
|
|
||||||
|
macro_rules! impl_from_beacon_state_error {
|
||||||
|
($type: ident) => {
|
||||||
|
impl From<BeaconStateError> for $type {
|
||||||
|
fn from(e: BeaconStateError) -> $type {
|
||||||
|
$type::BeaconStateError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! impl_into_with_index_with_beacon_error {
|
||||||
|
($error_type: ident, $invalid_type: ident) => {
|
||||||
|
impl IntoWithIndex<BlockProcessingError> for $error_type {
|
||||||
|
fn into_with_index(self, i: usize) -> BlockProcessingError {
|
||||||
|
match self {
|
||||||
|
$error_type::Invalid(e) => {
|
||||||
|
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
|
||||||
|
}
|
||||||
|
$error_type::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! impl_into_with_index_without_beacon_error {
|
||||||
|
($error_type: ident, $invalid_type: ident) => {
|
||||||
|
impl IntoWithIndex<BlockProcessingError> for $error_type {
|
||||||
|
fn into_with_index(self, i: usize) -> BlockProcessingError {
|
||||||
|
match self {
|
||||||
|
$error_type::Invalid(e) => {
|
||||||
|
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A conversion that consumes `self` and adds an `index` variable to resulting struct.
|
||||||
|
///
|
||||||
|
/// Used here to allow converting an error into an upstream error that points to the object that
|
||||||
|
/// caused the error. For example, pointing to the index of an attestation that caused the
|
||||||
|
/// `AttestationInvalid` error.
|
||||||
|
pub trait IntoWithIndex<T>: Sized {
|
||||||
|
fn into_with_index(self, index: usize) -> T;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Block Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum BlockProcessingError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(BlockInvalid),
|
||||||
|
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_from_beacon_state_error!(BlockProcessingError);
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum BlockInvalid {
|
||||||
|
StateSlotMismatch,
|
||||||
|
BadSignature,
|
||||||
|
BadRandaoSignature,
|
||||||
|
MaxAttestationsExceeded,
|
||||||
|
MaxAttesterSlashingsExceed,
|
||||||
|
MaxProposerSlashingsExceeded,
|
||||||
|
MaxDepositsExceeded,
|
||||||
|
MaxExitsExceeded,
|
||||||
|
MaxTransfersExceed,
|
||||||
|
AttestationInvalid(usize, AttestationInvalid),
|
||||||
|
AttesterSlashingInvalid(usize, AttesterSlashingInvalid),
|
||||||
|
ProposerSlashingInvalid(usize, ProposerSlashingInvalid),
|
||||||
|
DepositInvalid(usize, DepositInvalid),
|
||||||
|
// TODO: merge this into the `DepositInvalid` error.
|
||||||
|
DepositProcessingFailed(usize),
|
||||||
|
ExitInvalid(usize, ExitInvalid),
|
||||||
|
TransferInvalid(usize, TransferInvalid),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<BlockProcessingError> for BlockInvalid {
|
||||||
|
fn into(self) -> BlockProcessingError {
|
||||||
|
BlockProcessingError::Invalid(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attestation Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum AttestationValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(AttestationInvalid),
|
||||||
|
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum AttestationInvalid {
|
||||||
|
/// Attestation references a pre-genesis slot.
|
||||||
|
///
|
||||||
|
/// (genesis_slot, attestation_slot)
|
||||||
|
PreGenesis(Slot, Slot),
|
||||||
|
/// Attestation included before the inclusion delay.
|
||||||
|
///
|
||||||
|
/// (state_slot, inclusion_delay, attestation_slot)
|
||||||
|
IncludedTooEarly(Slot, u64, Slot),
|
||||||
|
/// Attestation slot is too far in the past to be included in a block.
|
||||||
|
///
|
||||||
|
/// (state_slot, attestation_slot)
|
||||||
|
IncludedTooLate(Slot, Slot),
|
||||||
|
/// Attestation justified epoch does not match the states current or previous justified epoch.
|
||||||
|
///
|
||||||
|
/// (attestation_justified_epoch, state_epoch, used_previous_epoch)
|
||||||
|
WrongJustifiedEpoch(Epoch, Epoch, bool),
|
||||||
|
/// Attestation justified epoch root does not match root known to the state.
|
||||||
|
///
|
||||||
|
/// (state_justified_root, attestation_justified_root)
|
||||||
|
WrongJustifiedRoot(Hash256, Hash256),
|
||||||
|
/// Attestation crosslink root does not match the state crosslink root for the attestations
|
||||||
|
/// slot.
|
||||||
|
BadLatestCrosslinkRoot,
|
||||||
|
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
||||||
|
CustodyBitfieldHasSetBits,
|
||||||
|
/// There are no set bits on the attestation -- an attestation must be signed by at least one
|
||||||
|
/// validator.
|
||||||
|
AggregationBitfieldIsEmpty,
|
||||||
|
/// The custody bitfield length is not the smallest possible size to represent the committee.
|
||||||
|
///
|
||||||
|
/// (committee_len, bitfield_len)
|
||||||
|
BadCustodyBitfieldLength(usize, usize),
|
||||||
|
/// The aggregation bitfield length is not the smallest possible size to represent the committee.
|
||||||
|
///
|
||||||
|
/// (committee_len, bitfield_len)
|
||||||
|
BadAggregationBitfieldLength(usize, usize),
|
||||||
|
/// There was no known committee for the given shard in the given slot.
|
||||||
|
///
|
||||||
|
/// (attestation_data_shard, attestation_data_slot)
|
||||||
|
NoCommitteeForShard(u64, Slot),
|
||||||
|
/// The attestation signature verification failed.
|
||||||
|
BadSignature,
|
||||||
|
/// The shard block root was not set to zero. This is a phase 0 requirement.
|
||||||
|
ShardBlockRootNotZero,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_from_beacon_state_error!(AttestationValidationError);
|
||||||
|
impl_into_with_index_with_beacon_error!(AttestationValidationError, AttestationInvalid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `AttesterSlashing` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum AttesterSlashingValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(AttesterSlashingInvalid),
|
||||||
|
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum AttesterSlashingInvalid {
|
||||||
|
/// The attestation data is identical, an attestation cannot conflict with itself.
|
||||||
|
AttestationDataIdentical,
|
||||||
|
/// The attestations were not in conflict.
|
||||||
|
NotSlashable,
|
||||||
|
/// The first `SlashableAttestation` was invalid.
|
||||||
|
SlashableAttestation1Invalid(SlashableAttestationInvalid),
|
||||||
|
/// The second `SlashableAttestation` was invalid.
|
||||||
|
SlashableAttestation2Invalid(SlashableAttestationInvalid),
|
||||||
|
/// The validator index is unknown. One cannot slash one who does not exist.
|
||||||
|
UnknownValidator(u64),
|
||||||
|
/// There were no indices able to be slashed.
|
||||||
|
NoSlashableIndices,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_from_beacon_state_error!(AttesterSlashingValidationError);
|
||||||
|
impl_into_with_index_with_beacon_error!(AttesterSlashingValidationError, AttesterSlashingInvalid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `SlashableAttestation` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum SlashableAttestationValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(SlashableAttestationInvalid),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum SlashableAttestationInvalid {
|
||||||
|
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
||||||
|
CustodyBitfieldHasSetBits,
|
||||||
|
/// No validator indices were specified.
|
||||||
|
NoValidatorIndices,
|
||||||
|
/// The validator indices were not in increasing order.
|
||||||
|
///
|
||||||
|
/// The error occured between the given `index` and `index + 1`
|
||||||
|
BadValidatorIndicesOrdering(usize),
|
||||||
|
/// The custody bitfield length is not the smallest possible size to represent the validators.
|
||||||
|
///
|
||||||
|
/// (validators_len, bitfield_len)
|
||||||
|
BadCustodyBitfieldLength(usize, usize),
|
||||||
|
/// The number of slashable indices exceed the global maximum.
|
||||||
|
///
|
||||||
|
/// (max_indices, indices_given)
|
||||||
|
MaxIndicesExceed(usize, usize),
|
||||||
|
/// The validator index is unknown. One cannot slash one who does not exist.
|
||||||
|
UnknownValidator(u64),
|
||||||
|
/// The slashable attestation aggregate signature was not valid.
|
||||||
|
BadSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<SlashableAttestationInvalid> for SlashableAttestationValidationError {
|
||||||
|
fn into(self) -> SlashableAttestationInvalid {
|
||||||
|
match self {
|
||||||
|
SlashableAttestationValidationError::Invalid(e) => e,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `ProposerSlashing` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum ProposerSlashingValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(ProposerSlashingInvalid),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum ProposerSlashingInvalid {
|
||||||
|
/// The proposer index is not a known validator.
|
||||||
|
ProposerUnknown(u64),
|
||||||
|
/// The two proposal have different slots.
|
||||||
|
///
|
||||||
|
/// (proposal_1_slot, proposal_2_slot)
|
||||||
|
ProposalSlotMismatch(Slot, Slot),
|
||||||
|
/// The two proposal have different shards.
|
||||||
|
///
|
||||||
|
/// (proposal_1_shard, proposal_2_shard)
|
||||||
|
ProposalShardMismatch(u64, u64),
|
||||||
|
/// The two proposal have different block roots.
|
||||||
|
///
|
||||||
|
/// (proposal_1_root, proposal_2_root)
|
||||||
|
ProposalBlockRootMismatch(Hash256, Hash256),
|
||||||
|
/// The specified proposer has already been slashed.
|
||||||
|
ProposerAlreadySlashed,
|
||||||
|
/// The first proposal signature was invalid.
|
||||||
|
BadProposal1Signature,
|
||||||
|
/// The second proposal signature was invalid.
|
||||||
|
BadProposal2Signature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_into_with_index_without_beacon_error!(
|
||||||
|
ProposerSlashingValidationError,
|
||||||
|
ProposerSlashingInvalid
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `Deposit` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum DepositValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(DepositInvalid),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum DepositInvalid {
|
||||||
|
/// The deposit index does not match the state index.
|
||||||
|
///
|
||||||
|
/// (state_index, deposit_index)
|
||||||
|
BadIndex(u64, u64),
|
||||||
|
/// The specified `branch` and `index` did not form a valid proof that the deposit is included
|
||||||
|
/// in the eth1 deposit root.
|
||||||
|
BadMerkleProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_into_with_index_without_beacon_error!(DepositValidationError, DepositInvalid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `Exit` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum ExitValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(ExitInvalid),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum ExitInvalid {
|
||||||
|
/// The specified validator is not in the state's validator registry.
|
||||||
|
ValidatorUnknown(u64),
|
||||||
|
AlreadyExited,
|
||||||
|
/// The exit is for a future epoch.
|
||||||
|
///
|
||||||
|
/// (state_epoch, exit_epoch)
|
||||||
|
FutureEpoch(Epoch, Epoch),
|
||||||
|
/// The exit signature was not signed by the validator.
|
||||||
|
BadSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_into_with_index_without_beacon_error!(ExitValidationError, ExitInvalid);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* `Transfer` Validation
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// The object is invalid or validation failed.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum TransferValidationError {
|
||||||
|
/// Validation completed successfully and the object is invalid.
|
||||||
|
Invalid(TransferInvalid),
|
||||||
|
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Describes why an object is invalid.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum TransferInvalid {
|
||||||
|
/// The validator indicated by `transfer.from` is unknown.
|
||||||
|
FromValidatorUnknown(u64),
|
||||||
|
/// The validator indicated by `transfer.to` is unknown.
|
||||||
|
ToValidatorUnknown(u64),
|
||||||
|
/// The balance of `transfer.from` is insufficient.
|
||||||
|
///
|
||||||
|
/// (required, available)
|
||||||
|
FromBalanceInsufficient(u64, u64),
|
||||||
|
/// Adding `transfer.fee` to `transfer.amount` causes an overflow.
|
||||||
|
///
|
||||||
|
/// (transfer_fee, transfer_amount)
|
||||||
|
FeeOverflow(u64, u64),
|
||||||
|
/// This transfer would result in the `transfer.from` account to have `0 < balance <
|
||||||
|
/// min_deposit_amount`
|
||||||
|
///
|
||||||
|
/// (resulting_amount, min_deposit_amount)
|
||||||
|
InvalidResultingFromBalance(u64, u64),
|
||||||
|
/// The state slot does not match `transfer.slot`.
|
||||||
|
///
|
||||||
|
/// (state_slot, transfer_slot)
|
||||||
|
StateSlotMismatch(Slot, Slot),
|
||||||
|
/// The `transfer.from` validator has been activated and is not withdrawable.
|
||||||
|
///
|
||||||
|
/// (from_validator)
|
||||||
|
FromValidatorIneligableForTransfer(u64),
|
||||||
|
/// The validators withdrawal credentials do not match `transfer.pubkey`.
|
||||||
|
///
|
||||||
|
/// (state_credentials, transfer_pubkey_credentials)
|
||||||
|
WithdrawalCredentialsMismatch(Hash256, Hash256),
|
||||||
|
/// The deposit was not signed by `deposit.pubkey`.
|
||||||
|
BadSignature,
|
||||||
|
/// Overflow when adding to `transfer.to` balance.
|
||||||
|
///
|
||||||
|
/// (to_balance, transfer_amount)
|
||||||
|
ToBalanceOverflow(u64, u64),
|
||||||
|
/// Overflow when adding to beacon proposer balance.
|
||||||
|
///
|
||||||
|
/// (proposer_balance, transfer_fee)
|
||||||
|
ProposerBalanceOverflow(u64, u64),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_from_beacon_state_error!(TransferValidationError);
|
||||||
|
impl_into_with_index_with_beacon_error!(TransferValidationError, TransferInvalid);
|
@ -0,0 +1,255 @@
|
|||||||
|
use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error};
|
||||||
|
use ssz::TreeHash;
|
||||||
|
use types::beacon_state::helpers::*;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn validate_attestation(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestation: &Attestation,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
validate_attestation_signature_optional(state, attestation, spec, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state, without validating the aggregate signature.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn validate_attestation_without_signature(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestation: &Attestation,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
validate_attestation_signature_optional(state, attestation, spec, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state, optionally validating the aggregate signature.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn validate_attestation_signature_optional(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestation: &Attestation,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
verify_signature: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// Verify that `attestation.data.slot >= GENESIS_SLOT`.
|
||||||
|
verify!(
|
||||||
|
attestation.data.slot >= spec.genesis_slot,
|
||||||
|
Invalid::PreGenesis(spec.genesis_slot, attestation.data.slot)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`.
|
||||||
|
verify!(
|
||||||
|
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
|
||||||
|
Invalid::IncludedTooEarly(
|
||||||
|
state.slot,
|
||||||
|
spec.min_attestation_inclusion_delay,
|
||||||
|
attestation.data.slot
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH`.
|
||||||
|
verify!(
|
||||||
|
state.slot < attestation.data.slot + spec.slots_per_epoch,
|
||||||
|
Invalid::IncludedTooLate(state.slot, attestation.data.slot)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch` if
|
||||||
|
// `slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else
|
||||||
|
// state.previous_justified_epoch`.
|
||||||
|
if (attestation.data.slot + 1).epoch(spec.slots_per_epoch) >= state.current_epoch(spec) {
|
||||||
|
verify!(
|
||||||
|
attestation.data.justified_epoch == state.justified_epoch,
|
||||||
|
Invalid::WrongJustifiedEpoch(
|
||||||
|
attestation.data.justified_epoch,
|
||||||
|
state.justified_epoch,
|
||||||
|
false
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
verify!(
|
||||||
|
attestation.data.justified_epoch == state.previous_justified_epoch,
|
||||||
|
Invalid::WrongJustifiedEpoch(
|
||||||
|
attestation.data.justified_epoch,
|
||||||
|
state.previous_justified_epoch,
|
||||||
|
true
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state,
|
||||||
|
// get_epoch_start_slot(attestation.data.justified_epoch))`.
|
||||||
|
let justified_block_root = *state
|
||||||
|
.get_block_root(
|
||||||
|
attestation
|
||||||
|
.data
|
||||||
|
.justified_epoch
|
||||||
|
.start_slot(spec.slots_per_epoch),
|
||||||
|
&spec,
|
||||||
|
)
|
||||||
|
.ok_or(BeaconStateError::InsufficientBlockRoots)?;
|
||||||
|
verify!(
|
||||||
|
attestation.data.justified_block_root == justified_block_root,
|
||||||
|
Invalid::WrongJustifiedRoot(justified_block_root, attestation.data.justified_block_root)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify that either:
|
||||||
|
//
|
||||||
|
// (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`,
|
||||||
|
//
|
||||||
|
// (ii) `state.latest_crosslinks[attestation.data.shard] ==
|
||||||
|
// Crosslink(crosslink_data_root=attestation.data.crosslink_data_root,
|
||||||
|
// epoch=slot_to_epoch(attestation.data.slot))`.
|
||||||
|
let potential_crosslink = Crosslink {
|
||||||
|
crosslink_data_root: attestation.data.crosslink_data_root,
|
||||||
|
epoch: attestation.data.slot.epoch(spec.slots_per_epoch),
|
||||||
|
};
|
||||||
|
verify!(
|
||||||
|
(attestation.data.latest_crosslink
|
||||||
|
== state.latest_crosslinks[attestation.data.shard as usize])
|
||||||
|
| (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink),
|
||||||
|
Invalid::BadLatestCrosslinkRoot
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get the committee for this attestation
|
||||||
|
let (committee, _shard) = state
|
||||||
|
.get_crosslink_committees_at_slot(attestation.data.slot, spec)?
|
||||||
|
.iter()
|
||||||
|
.find(|(_committee, shard)| *shard == attestation.data.shard)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::Invalid(Invalid::NoCommitteeForShard(
|
||||||
|
attestation.data.shard,
|
||||||
|
attestation.data.slot,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Custody bitfield is all zeros (phase 0 requirement).
|
||||||
|
verify!(
|
||||||
|
attestation.custody_bitfield.num_set_bits() == 0,
|
||||||
|
Invalid::CustodyBitfieldHasSetBits
|
||||||
|
);
|
||||||
|
// Custody bitfield length is correct.
|
||||||
|
verify!(
|
||||||
|
verify_bitfield_length(&attestation.custody_bitfield, committee.len()),
|
||||||
|
Invalid::BadCustodyBitfieldLength(committee.len(), attestation.custody_bitfield.len())
|
||||||
|
);
|
||||||
|
// Aggregation bitfield isn't empty.
|
||||||
|
verify!(
|
||||||
|
attestation.aggregation_bitfield.num_set_bits() != 0,
|
||||||
|
Invalid::AggregationBitfieldIsEmpty
|
||||||
|
);
|
||||||
|
// Aggregation bitfield length is correct.
|
||||||
|
verify!(
|
||||||
|
verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()),
|
||||||
|
Invalid::BadAggregationBitfieldLength(
|
||||||
|
committee.len(),
|
||||||
|
attestation.aggregation_bitfield.len()
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
if verify_signature {
|
||||||
|
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
|
||||||
|
verify!(
|
||||||
|
verify_attestation_signature(
|
||||||
|
state,
|
||||||
|
committee,
|
||||||
|
attestation_epoch,
|
||||||
|
&attestation.custody_bitfield,
|
||||||
|
&attestation.data,
|
||||||
|
&attestation.aggregate_signature,
|
||||||
|
spec
|
||||||
|
),
|
||||||
|
Invalid::BadSignature
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`.
|
||||||
|
verify!(
|
||||||
|
attestation.data.crosslink_data_root == spec.zero_hash,
|
||||||
|
Invalid::ShardBlockRootNotZero
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies an aggregate signature for some given `AttestationData`, returning `true` if the
|
||||||
|
/// `aggregate_signature` is valid.
|
||||||
|
///
|
||||||
|
/// Returns `false` if:
|
||||||
|
/// - `aggregate_signature` was not signed correctly.
|
||||||
|
/// - `custody_bitfield` does not have a bit for each index of `committee`.
|
||||||
|
/// - A `validator_index` in `committee` is not in `state.validator_registry`.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn verify_attestation_signature(
|
||||||
|
state: &BeaconState,
|
||||||
|
committee: &[usize],
|
||||||
|
attestation_epoch: Epoch,
|
||||||
|
custody_bitfield: &Bitfield,
|
||||||
|
attestation_data: &AttestationData,
|
||||||
|
aggregate_signature: &AggregateSignature,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> bool {
|
||||||
|
let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2];
|
||||||
|
let mut message_exists = vec![false; 2];
|
||||||
|
|
||||||
|
for (i, v) in committee.iter().enumerate() {
|
||||||
|
let custody_bit = match custody_bitfield.get(i) {
|
||||||
|
Ok(bit) => bit,
|
||||||
|
// Invalidate signature if custody_bitfield.len() < committee
|
||||||
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
|
||||||
|
message_exists[custody_bit as usize] = true;
|
||||||
|
|
||||||
|
match state.validator_registry.get(*v as usize) {
|
||||||
|
Some(validator) => {
|
||||||
|
aggregate_pubs[custody_bit as usize].add(&validator.pubkey);
|
||||||
|
}
|
||||||
|
// Invalidate signature if validator index is unknown.
|
||||||
|
None => return false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message when custody bitfield is `false`
|
||||||
|
let message_0 = AttestationDataAndCustodyBit {
|
||||||
|
data: attestation_data.clone(),
|
||||||
|
custody_bit: false,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
|
||||||
|
// Message when custody bitfield is `true`
|
||||||
|
let message_1 = AttestationDataAndCustodyBit {
|
||||||
|
data: attestation_data.clone(),
|
||||||
|
custody_bit: true,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
|
||||||
|
let mut messages = vec![];
|
||||||
|
let mut keys = vec![];
|
||||||
|
|
||||||
|
// If any validator signed a message with a `false` custody bit.
|
||||||
|
if message_exists[0] {
|
||||||
|
messages.push(&message_0[..]);
|
||||||
|
keys.push(&aggregate_pubs[0]);
|
||||||
|
}
|
||||||
|
// If any validator signed a message with a `true` custody bit.
|
||||||
|
if message_exists[1] {
|
||||||
|
messages.push(&message_1[..]);
|
||||||
|
keys.push(&aggregate_pubs[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork);
|
||||||
|
|
||||||
|
aggregate_signature.verify_multiple(&messages[..], domain, &keys[..])
|
||||||
|
}
|
@ -0,0 +1,49 @@
|
|||||||
|
use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error};
|
||||||
|
use super::verify_slashable_attestation::verify_slashable_attestation;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if an `AttesterSlashing` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_attester_slashing(
|
||||||
|
state: &BeaconState,
|
||||||
|
attester_slashing: &AttesterSlashing,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Vec<u64>, Error> {
|
||||||
|
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
|
||||||
|
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
slashable_attestation_1.data != slashable_attestation_2.data,
|
||||||
|
Invalid::AttestationDataIdentical
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
slashable_attestation_1.is_double_vote(slashable_attestation_2, spec)
|
||||||
|
| slashable_attestation_1.is_surround_vote(slashable_attestation_2, spec),
|
||||||
|
Invalid::NotSlashable
|
||||||
|
);
|
||||||
|
|
||||||
|
verify_slashable_attestation(state, &slashable_attestation_1, spec)
|
||||||
|
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?;
|
||||||
|
verify_slashable_attestation(state, &slashable_attestation_2, spec)
|
||||||
|
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?;
|
||||||
|
|
||||||
|
let mut slashable_indices = vec![];
|
||||||
|
for i in &slashable_attestation_1.validator_indices {
|
||||||
|
let validator = state
|
||||||
|
.validator_registry
|
||||||
|
.get(*i as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?;
|
||||||
|
|
||||||
|
if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed {
|
||||||
|
slashable_indices.push(*i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices);
|
||||||
|
|
||||||
|
Ok(slashable_indices)
|
||||||
|
}
|
@ -0,0 +1,73 @@
|
|||||||
|
use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error};
|
||||||
|
use hashing::hash;
|
||||||
|
use merkle_proof::verify_merkle_proof;
|
||||||
|
use ssz::ssz_encode;
|
||||||
|
use ssz_derive::Encode;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Note: this function is incomplete.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_deposit(
|
||||||
|
state: &BeaconState,
|
||||||
|
deposit: &Deposit,
|
||||||
|
verify_merkle_branch: bool,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify!(
|
||||||
|
deposit.index == state.deposit_index,
|
||||||
|
Invalid::BadIndex(state.deposit_index, deposit.index)
|
||||||
|
);
|
||||||
|
|
||||||
|
if verify_merkle_branch {
|
||||||
|
verify!(
|
||||||
|
verify_deposit_merkle_proof(state, deposit, spec),
|
||||||
|
Invalid::BadMerkleProof
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify that a deposit is included in the state's eth1 deposit root.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool {
|
||||||
|
let leaf = hash(&get_serialized_deposit_data(deposit));
|
||||||
|
verify_merkle_proof(
|
||||||
|
Hash256::from_slice(&leaf),
|
||||||
|
&deposit.branch,
|
||||||
|
spec.deposit_contract_tree_depth as usize,
|
||||||
|
deposit.index as usize,
|
||||||
|
state.latest_eth1_data.deposit_root,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper struct for easily getting the serialized data generated by the deposit contract.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Encode)]
|
||||||
|
struct SerializedDepositData {
|
||||||
|
amount: u64,
|
||||||
|
timestamp: u64,
|
||||||
|
input: DepositInput,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the serialized data generated by the deposit contract that is used to generate the
|
||||||
|
/// merkle proof.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn get_serialized_deposit_data(deposit: &Deposit) -> Vec<u8> {
|
||||||
|
let serialized_deposit_data = SerializedDepositData {
|
||||||
|
amount: deposit.deposit_data.amount,
|
||||||
|
timestamp: deposit.deposit_data.timestamp,
|
||||||
|
input: deposit.deposit_data.deposit_input.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
ssz_encode(&serialized_deposit_data)
|
||||||
|
}
|
@ -0,0 +1,42 @@
|
|||||||
|
use super::errors::{ExitInvalid as Invalid, ExitValidationError as Error};
|
||||||
|
use ssz::SignedRoot;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if an `Exit` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_exit(
|
||||||
|
state: &BeaconState,
|
||||||
|
exit: &VoluntaryExit,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let validator = state
|
||||||
|
.validator_registry
|
||||||
|
.get(exit.validator_index as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
validator.exit_epoch
|
||||||
|
> state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec),
|
||||||
|
Invalid::AlreadyExited
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
state.current_epoch(spec) >= exit.epoch,
|
||||||
|
Invalid::FutureEpoch(state.current_epoch(spec), exit.epoch)
|
||||||
|
);
|
||||||
|
|
||||||
|
let message = exit.signed_root();
|
||||||
|
let domain = spec.get_domain(exit.epoch, Domain::Exit, &state.fork);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
exit.signature
|
||||||
|
.verify(&message[..], domain, &validator.pubkey),
|
||||||
|
Invalid::BadSignature
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -0,0 +1,87 @@
|
|||||||
|
use super::errors::{ProposerSlashingInvalid as Invalid, ProposerSlashingValidationError as Error};
|
||||||
|
use ssz::SignedRoot;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if a `ProposerSlashing` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_proposer_slashing(
|
||||||
|
proposer_slashing: &ProposerSlashing,
|
||||||
|
state: &BeaconState,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let proposer = state
|
||||||
|
.validator_registry
|
||||||
|
.get(proposer_slashing.proposer_index as usize)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot,
|
||||||
|
Invalid::ProposalSlotMismatch(
|
||||||
|
proposer_slashing.proposal_1.slot,
|
||||||
|
proposer_slashing.proposal_2.slot
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard,
|
||||||
|
Invalid::ProposalShardMismatch(
|
||||||
|
proposer_slashing.proposal_1.shard,
|
||||||
|
proposer_slashing.proposal_2.shard
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root,
|
||||||
|
Invalid::ProposalBlockRootMismatch(
|
||||||
|
proposer_slashing.proposal_1.block_root,
|
||||||
|
proposer_slashing.proposal_2.block_root
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
verify_proposal_signature(
|
||||||
|
&proposer_slashing.proposal_1,
|
||||||
|
&proposer.pubkey,
|
||||||
|
&state.fork,
|
||||||
|
spec
|
||||||
|
),
|
||||||
|
Invalid::BadProposal1Signature
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
verify_proposal_signature(
|
||||||
|
&proposer_slashing.proposal_2,
|
||||||
|
&proposer.pubkey,
|
||||||
|
&state.fork,
|
||||||
|
spec
|
||||||
|
),
|
||||||
|
Invalid::BadProposal2Signature
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies the signature of a proposal.
|
||||||
|
///
|
||||||
|
/// Returns `true` if the signature is valid.
|
||||||
|
fn verify_proposal_signature(
|
||||||
|
proposal: &Proposal,
|
||||||
|
pubkey: &PublicKey,
|
||||||
|
fork: &Fork,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> bool {
|
||||||
|
let message = proposal.signed_root();
|
||||||
|
let domain = spec.get_domain(
|
||||||
|
proposal.slot.epoch(spec.slots_per_epoch),
|
||||||
|
Domain::Proposal,
|
||||||
|
fork,
|
||||||
|
);
|
||||||
|
proposal.signature.verify(&message[..], domain, pubkey)
|
||||||
|
}
|
@ -0,0 +1,112 @@
|
|||||||
|
use super::errors::{
|
||||||
|
SlashableAttestationInvalid as Invalid, SlashableAttestationValidationError as Error,
|
||||||
|
};
|
||||||
|
use ssz::TreeHash;
|
||||||
|
use types::beacon_state::helpers::verify_bitfield_length;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if a `SlashableAttestation` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_slashable_attestation(
|
||||||
|
state: &BeaconState,
|
||||||
|
slashable_attestation: &SlashableAttestation,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if slashable_attestation.custody_bitfield.num_set_bits() > 0 {
|
||||||
|
invalid!(Invalid::CustodyBitfieldHasSetBits);
|
||||||
|
}
|
||||||
|
|
||||||
|
if slashable_attestation.validator_indices.is_empty() {
|
||||||
|
invalid!(Invalid::NoValidatorIndices);
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..(slashable_attestation.validator_indices.len() - 1) {
|
||||||
|
if slashable_attestation.validator_indices[i]
|
||||||
|
>= slashable_attestation.validator_indices[i + 1]
|
||||||
|
{
|
||||||
|
invalid!(Invalid::BadValidatorIndicesOrdering(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verify_bitfield_length(
|
||||||
|
&slashable_attestation.custody_bitfield,
|
||||||
|
slashable_attestation.validator_indices.len(),
|
||||||
|
) {
|
||||||
|
invalid!(Invalid::BadCustodyBitfieldLength(
|
||||||
|
slashable_attestation.validator_indices.len(),
|
||||||
|
slashable_attestation.custody_bitfield.len()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if slashable_attestation.validator_indices.len() > spec.max_indices_per_slashable_vote as usize
|
||||||
|
{
|
||||||
|
invalid!(Invalid::MaxIndicesExceed(
|
||||||
|
spec.max_indices_per_slashable_vote as usize,
|
||||||
|
slashable_attestation.validator_indices.len()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: this signature verification could likely be replaced with:
|
||||||
|
//
|
||||||
|
// super::validate_attestation::validate_attestation_signature(..)
|
||||||
|
|
||||||
|
let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2];
|
||||||
|
let mut message_exists = vec![false; 2];
|
||||||
|
|
||||||
|
for (i, v) in slashable_attestation.validator_indices.iter().enumerate() {
|
||||||
|
let custody_bit = match slashable_attestation.custody_bitfield.get(i) {
|
||||||
|
Ok(bit) => bit,
|
||||||
|
Err(_) => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
message_exists[custody_bit as usize] = true;
|
||||||
|
|
||||||
|
match state.validator_registry.get(*v as usize) {
|
||||||
|
Some(validator) => {
|
||||||
|
aggregate_pubs[custody_bit as usize].add(&validator.pubkey);
|
||||||
|
}
|
||||||
|
None => invalid!(Invalid::UnknownValidator(*v)),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let message_0 = AttestationDataAndCustodyBit {
|
||||||
|
data: slashable_attestation.data.clone(),
|
||||||
|
custody_bit: false,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
let message_1 = AttestationDataAndCustodyBit {
|
||||||
|
data: slashable_attestation.data.clone(),
|
||||||
|
custody_bit: true,
|
||||||
|
}
|
||||||
|
.hash_tree_root();
|
||||||
|
|
||||||
|
let mut messages = vec![];
|
||||||
|
let mut keys = vec![];
|
||||||
|
|
||||||
|
if message_exists[0] {
|
||||||
|
messages.push(&message_0[..]);
|
||||||
|
keys.push(&aggregate_pubs[0]);
|
||||||
|
}
|
||||||
|
if message_exists[1] {
|
||||||
|
messages.push(&message_1[..]);
|
||||||
|
keys.push(&aggregate_pubs[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let domain = {
|
||||||
|
let epoch = slashable_attestation.data.slot.epoch(spec.slots_per_epoch);
|
||||||
|
spec.get_domain(epoch, Domain::Attestation, &state.fork)
|
||||||
|
};
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
slashable_attestation
|
||||||
|
.aggregate_signature
|
||||||
|
.verify_multiple(&messages[..], domain, &keys[..]),
|
||||||
|
Invalid::BadSignature
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -0,0 +1,135 @@
|
|||||||
|
use super::errors::{TransferInvalid as Invalid, TransferValidationError as Error};
|
||||||
|
use bls::get_withdrawal_credentials;
|
||||||
|
use ssz::SignedRoot;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if a `Transfer` is valid to be included in a block in the current epoch of the given
|
||||||
|
/// state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Note: this function is incomplete.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_transfer(
|
||||||
|
state: &BeaconState,
|
||||||
|
transfer: &Transfer,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let from_balance = *state
|
||||||
|
.validator_balances
|
||||||
|
.get(transfer.from as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
|
||||||
|
|
||||||
|
let total_amount = transfer
|
||||||
|
.amount
|
||||||
|
.checked_add(transfer.fee)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
from_balance >= transfer.amount,
|
||||||
|
Invalid::FromBalanceInsufficient(transfer.amount, from_balance)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
from_balance >= transfer.fee,
|
||||||
|
Invalid::FromBalanceInsufficient(transfer.fee, from_balance)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
(from_balance == total_amount)
|
||||||
|
|| (from_balance >= (total_amount + spec.min_deposit_amount)),
|
||||||
|
Invalid::InvalidResultingFromBalance(from_balance - total_amount, spec.min_deposit_amount)
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
state.slot == transfer.slot,
|
||||||
|
Invalid::StateSlotMismatch(state.slot, transfer.slot)
|
||||||
|
);
|
||||||
|
|
||||||
|
let from_validator = state
|
||||||
|
.validator_registry
|
||||||
|
.get(transfer.from as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
|
||||||
|
let epoch = state.slot.epoch(spec.slots_per_epoch);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
from_validator.is_withdrawable_at(epoch)
|
||||||
|
|| from_validator.activation_epoch == spec.far_future_epoch,
|
||||||
|
Invalid::FromValidatorIneligableForTransfer(transfer.from)
|
||||||
|
);
|
||||||
|
|
||||||
|
let transfer_withdrawal_credentials = Hash256::from_slice(
|
||||||
|
&get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..],
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
from_validator.withdrawal_credentials == transfer_withdrawal_credentials,
|
||||||
|
Invalid::WithdrawalCredentialsMismatch(
|
||||||
|
from_validator.withdrawal_credentials,
|
||||||
|
transfer_withdrawal_credentials
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let message = transfer.signed_root();
|
||||||
|
let domain = spec.get_domain(
|
||||||
|
transfer.slot.epoch(spec.slots_per_epoch),
|
||||||
|
Domain::Transfer,
|
||||||
|
&state.fork,
|
||||||
|
);
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
transfer
|
||||||
|
.signature
|
||||||
|
.verify(&message[..], domain, &transfer.pubkey),
|
||||||
|
Invalid::BadSignature
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Executes a transfer on the state.
|
||||||
|
///
|
||||||
|
/// Does not check that the transfer is valid, however checks for overflow in all actions.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn execute_transfer(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
transfer: &Transfer,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let from_balance = *state
|
||||||
|
.validator_balances
|
||||||
|
.get(transfer.from as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?;
|
||||||
|
let to_balance = *state
|
||||||
|
.validator_balances
|
||||||
|
.get(transfer.to as usize)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.to)))?;
|
||||||
|
|
||||||
|
let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?;
|
||||||
|
let proposer_balance = state.validator_balances[proposer_index];
|
||||||
|
|
||||||
|
let total_amount = transfer
|
||||||
|
.amount
|
||||||
|
.checked_add(transfer.fee)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
|
||||||
|
|
||||||
|
state.validator_balances[transfer.from as usize] =
|
||||||
|
from_balance.checked_sub(total_amount).ok_or_else(|| {
|
||||||
|
Error::Invalid(Invalid::FromBalanceInsufficient(total_amount, from_balance))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
state.validator_balances[transfer.to as usize] = to_balance
|
||||||
|
.checked_add(transfer.amount)
|
||||||
|
.ok_or_else(|| Error::Invalid(Invalid::ToBalanceOverflow(to_balance, transfer.amount)))?;
|
||||||
|
|
||||||
|
state.validator_balances[proposer_index] =
|
||||||
|
proposer_balance.checked_add(transfer.fee).ok_or_else(|| {
|
||||||
|
Error::Invalid(Invalid::ProposerBalanceOverflow(
|
||||||
|
proposer_balance,
|
||||||
|
transfer.fee,
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
491
eth2/state_processing/src/per_epoch_processing.rs
Normal file
491
eth2/state_processing/src/per_epoch_processing.rs
Normal file
@ -0,0 +1,491 @@
|
|||||||
|
use attester_sets::AttesterSets;
|
||||||
|
use errors::EpochProcessingError as Error;
|
||||||
|
use inclusion_distance::{inclusion_distance, inclusion_slot};
|
||||||
|
use integer_sqrt::IntegerSquareRoot;
|
||||||
|
use log::debug;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use ssz::TreeHash;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
use types::{validator_registry::get_active_validator_indices, *};
|
||||||
|
use winning_root::{winning_root, WinningRoot};
|
||||||
|
|
||||||
|
pub mod attester_sets;
|
||||||
|
pub mod errors;
|
||||||
|
pub mod inclusion_distance;
|
||||||
|
pub mod tests;
|
||||||
|
pub mod winning_root;
|
||||||
|
|
||||||
|
pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
||||||
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
let previous_epoch = state.previous_epoch(spec);
|
||||||
|
let next_epoch = state.next_epoch(spec);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Starting per-epoch processing on epoch {}...",
|
||||||
|
state.current_epoch(spec)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Ensure all of the caches are built.
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Next, spec)?;
|
||||||
|
|
||||||
|
let attesters = AttesterSets::new(&state, spec)?;
|
||||||
|
|
||||||
|
let active_validator_indices = get_active_validator_indices(
|
||||||
|
&state.validator_registry,
|
||||||
|
state.slot.epoch(spec.slots_per_epoch),
|
||||||
|
);
|
||||||
|
|
||||||
|
let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec);
|
||||||
|
let previous_total_balance = state.get_total_balance(
|
||||||
|
&get_active_validator_indices(&state.validator_registry, previous_epoch)[..],
|
||||||
|
spec,
|
||||||
|
);
|
||||||
|
|
||||||
|
process_eth1_data(state, spec);
|
||||||
|
|
||||||
|
process_justification(
|
||||||
|
state,
|
||||||
|
current_total_balance,
|
||||||
|
previous_total_balance,
|
||||||
|
attesters.previous_epoch_boundary.balance,
|
||||||
|
attesters.current_epoch_boundary.balance,
|
||||||
|
spec,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Crosslinks
|
||||||
|
let winning_root_for_shards = process_crosslinks(state, spec)?;
|
||||||
|
|
||||||
|
// Rewards and Penalities
|
||||||
|
let active_validator_indices_hashset: HashSet<usize> =
|
||||||
|
HashSet::from_iter(active_validator_indices.iter().cloned());
|
||||||
|
process_rewards_and_penalities(
|
||||||
|
state,
|
||||||
|
active_validator_indices_hashset,
|
||||||
|
&attesters,
|
||||||
|
previous_total_balance,
|
||||||
|
&winning_root_for_shards,
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Ejections
|
||||||
|
state.process_ejections(spec);
|
||||||
|
|
||||||
|
// Validator Registry
|
||||||
|
process_validator_registry(state, spec)?;
|
||||||
|
|
||||||
|
// Final updates
|
||||||
|
let active_tree_root = get_active_validator_indices(
|
||||||
|
&state.validator_registry,
|
||||||
|
next_epoch + Epoch::from(spec.activation_exit_delay),
|
||||||
|
)
|
||||||
|
.hash_tree_root();
|
||||||
|
state.latest_active_index_roots[(next_epoch.as_usize()
|
||||||
|
+ spec.activation_exit_delay as usize)
|
||||||
|
% spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]);
|
||||||
|
|
||||||
|
state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] =
|
||||||
|
state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length];
|
||||||
|
state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state
|
||||||
|
.get_randao_mix(current_epoch, spec)
|
||||||
|
.and_then(|x| Some(*x))
|
||||||
|
.ok_or_else(|| Error::NoRandaoSeed)?;
|
||||||
|
state.latest_attestations = state
|
||||||
|
.latest_attestations
|
||||||
|
.iter()
|
||||||
|
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch)
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Rotate the epoch caches to suit the epoch transition.
|
||||||
|
state.advance_caches();
|
||||||
|
|
||||||
|
debug!("Epoch transition complete.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) {
|
||||||
|
let next_epoch = state.next_epoch(spec);
|
||||||
|
let voting_period = spec.epochs_per_eth1_voting_period;
|
||||||
|
|
||||||
|
if next_epoch % voting_period == 0 {
|
||||||
|
for eth1_data_vote in &state.eth1_data_votes {
|
||||||
|
if eth1_data_vote.vote_count * 2 > voting_period {
|
||||||
|
state.latest_eth1_data = eth1_data_vote.eth1_data.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
state.eth1_data_votes = vec![];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn process_justification(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
current_total_balance: u64,
|
||||||
|
previous_total_balance: u64,
|
||||||
|
previous_epoch_boundary_attesting_balance: u64,
|
||||||
|
current_epoch_boundary_attesting_balance: u64,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) {
|
||||||
|
let previous_epoch = state.previous_epoch(spec);
|
||||||
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
|
||||||
|
let mut new_justified_epoch = state.justified_epoch;
|
||||||
|
state.justification_bitfield <<= 1;
|
||||||
|
|
||||||
|
// If > 2/3 of the total balance attested to the previous epoch boundary
|
||||||
|
//
|
||||||
|
// - Set the 2nd bit of the bitfield.
|
||||||
|
// - Set the previous epoch to be justified.
|
||||||
|
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) {
|
||||||
|
state.justification_bitfield |= 2;
|
||||||
|
new_justified_epoch = previous_epoch;
|
||||||
|
}
|
||||||
|
// If > 2/3 of the total balance attested to the previous epoch boundary
|
||||||
|
//
|
||||||
|
// - Set the 1st bit of the bitfield.
|
||||||
|
// - Set the current epoch to be justified.
|
||||||
|
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
|
||||||
|
state.justification_bitfield |= 1;
|
||||||
|
new_justified_epoch = current_epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If:
|
||||||
|
//
|
||||||
|
// - All three epochs prior to this epoch have been justified.
|
||||||
|
// - The previous justified justified epoch was three epochs ago.
|
||||||
|
//
|
||||||
|
// Then, set the finalized epoch to be three epochs ago.
|
||||||
|
if ((state.justification_bitfield >> 1) % 8 == 0b111)
|
||||||
|
& (state.previous_justified_epoch == previous_epoch - 2)
|
||||||
|
{
|
||||||
|
state.finalized_epoch = state.previous_justified_epoch;
|
||||||
|
}
|
||||||
|
// If:
|
||||||
|
//
|
||||||
|
// - Both two epochs prior to this epoch have been justified.
|
||||||
|
// - The previous justified epoch was two epochs ago.
|
||||||
|
//
|
||||||
|
// Then, set the finalized epoch to two epochs ago.
|
||||||
|
if ((state.justification_bitfield >> 1) % 4 == 0b11)
|
||||||
|
& (state.previous_justified_epoch == previous_epoch - 1)
|
||||||
|
{
|
||||||
|
state.finalized_epoch = state.previous_justified_epoch;
|
||||||
|
}
|
||||||
|
// If:
|
||||||
|
//
|
||||||
|
// - This epoch and the two prior have been justified.
|
||||||
|
// - The presently justified epoch was two epochs ago.
|
||||||
|
//
|
||||||
|
// Then, set the finalized epoch to two epochs ago.
|
||||||
|
if (state.justification_bitfield % 8 == 0b111) & (state.justified_epoch == previous_epoch - 1) {
|
||||||
|
state.finalized_epoch = state.justified_epoch;
|
||||||
|
}
|
||||||
|
// If:
|
||||||
|
//
|
||||||
|
// - This epoch and the epoch prior to it have been justified.
|
||||||
|
// - Set the previous epoch to be justified.
|
||||||
|
//
|
||||||
|
// Then, set the finalized epoch to be the previous epoch.
|
||||||
|
if (state.justification_bitfield % 4 == 0b11) & (state.justified_epoch == previous_epoch) {
|
||||||
|
state.finalized_epoch = state.justified_epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.previous_justified_epoch = state.justified_epoch;
|
||||||
|
state.justified_epoch = new_justified_epoch;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type WinningRootHashSet = HashMap<u64, WinningRoot>;
|
||||||
|
|
||||||
|
fn process_crosslinks(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<WinningRootHashSet, Error> {
|
||||||
|
let current_epoch_attestations: Vec<&PendingAttestation> = state
|
||||||
|
.latest_attestations
|
||||||
|
.par_iter()
|
||||||
|
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.current_epoch(spec))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let previous_epoch_attestations: Vec<&PendingAttestation> = state
|
||||||
|
.latest_attestations
|
||||||
|
.par_iter()
|
||||||
|
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut winning_root_for_shards: WinningRootHashSet = HashMap::new();
|
||||||
|
|
||||||
|
let previous_and_current_epoch_slots: Vec<Slot> = state
|
||||||
|
.previous_epoch(spec)
|
||||||
|
.slot_iter(spec.slots_per_epoch)
|
||||||
|
.chain(state.current_epoch(spec).slot_iter(spec.slots_per_epoch))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for slot in previous_and_current_epoch_slots {
|
||||||
|
// Clone removes the borrow which becomes an issue when mutating `state.balances`.
|
||||||
|
let crosslink_committees_at_slot =
|
||||||
|
state.get_crosslink_committees_at_slot(slot, spec)?.clone();
|
||||||
|
|
||||||
|
for (crosslink_committee, shard) in crosslink_committees_at_slot {
|
||||||
|
let shard = shard as u64;
|
||||||
|
|
||||||
|
let winning_root = winning_root(
|
||||||
|
state,
|
||||||
|
shard,
|
||||||
|
¤t_epoch_attestations[..],
|
||||||
|
&previous_epoch_attestations[..],
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if let Some(winning_root) = winning_root {
|
||||||
|
let total_committee_balance = state.get_total_balance(&crosslink_committee, spec);
|
||||||
|
|
||||||
|
// TODO: I think this has a bug.
|
||||||
|
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
|
||||||
|
state.latest_crosslinks[shard as usize] = Crosslink {
|
||||||
|
epoch: state.current_epoch(spec),
|
||||||
|
crosslink_data_root: winning_root.crosslink_data_root,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
winning_root_for_shards.insert(shard, winning_root);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(winning_root_for_shards)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn process_rewards_and_penalities(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
active_validator_indices: HashSet<usize>,
|
||||||
|
attesters: &AttesterSets,
|
||||||
|
previous_total_balance: u64,
|
||||||
|
winning_root_for_shards: &WinningRootHashSet,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let next_epoch = state.next_epoch(spec);
|
||||||
|
|
||||||
|
let previous_epoch_attestations: Vec<&PendingAttestation> = state
|
||||||
|
.latest_attestations
|
||||||
|
.par_iter()
|
||||||
|
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
|
||||||
|
|
||||||
|
if base_reward_quotient == 0 {
|
||||||
|
return Err(Error::BaseRewardQuotientIsZero);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Justification and finalization
|
||||||
|
|
||||||
|
let epochs_since_finality = next_epoch - state.finalized_epoch;
|
||||||
|
|
||||||
|
if epochs_since_finality <= 4 {
|
||||||
|
for index in 0..state.validator_balances.len() {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
|
||||||
|
// Expected FFG source
|
||||||
|
if attesters.previous_epoch.indices.contains(&index) {
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward * attesters.previous_epoch.balance / previous_total_balance
|
||||||
|
);
|
||||||
|
} else if active_validator_indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], base_reward);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected FFG target
|
||||||
|
if attesters.previous_epoch_boundary.indices.contains(&index) {
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward * attesters.previous_epoch_boundary.balance
|
||||||
|
/ previous_total_balance
|
||||||
|
);
|
||||||
|
} else if active_validator_indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], base_reward);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected beacon chain head
|
||||||
|
if attesters.previous_epoch_head.indices.contains(&index) {
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward * attesters.previous_epoch_head.balance / previous_total_balance
|
||||||
|
);
|
||||||
|
} else if active_validator_indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], base_reward);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inclusion distance
|
||||||
|
for &index in &attesters.previous_epoch.indices {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
let inclusion_distance =
|
||||||
|
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
|
||||||
|
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for index in 0..state.validator_balances.len() {
|
||||||
|
let inactivity_penalty =
|
||||||
|
state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec);
|
||||||
|
|
||||||
|
if active_validator_indices.contains(&index) {
|
||||||
|
if !attesters.previous_epoch.indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
|
||||||
|
}
|
||||||
|
if !attesters.previous_epoch_boundary.indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
|
||||||
|
}
|
||||||
|
if !attesters.previous_epoch_head.indices.contains(&index) {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.validator_registry[index].slashed {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
safe_sub_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
2 * inactivity_penalty + base_reward
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for &index in &attesters.previous_epoch.indices {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
let inclusion_distance =
|
||||||
|
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
|
||||||
|
|
||||||
|
safe_sub_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward
|
||||||
|
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attestation inclusion
|
||||||
|
|
||||||
|
for &index in &attesters.previous_epoch.indices {
|
||||||
|
let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?;
|
||||||
|
|
||||||
|
let proposer_index = state
|
||||||
|
.get_beacon_proposer_index(inclusion_slot, spec)
|
||||||
|
.map_err(|_| Error::UnableToDetermineProducer)?;
|
||||||
|
|
||||||
|
let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec);
|
||||||
|
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[proposer_index],
|
||||||
|
base_reward / spec.attestation_inclusion_reward_quotient
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Crosslinks
|
||||||
|
|
||||||
|
for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) {
|
||||||
|
// Clone removes the borrow which becomes an issue when mutating `state.balances`.
|
||||||
|
let crosslink_committees_at_slot =
|
||||||
|
state.get_crosslink_committees_at_slot(slot, spec)?.clone();
|
||||||
|
|
||||||
|
for (crosslink_committee, shard) in crosslink_committees_at_slot {
|
||||||
|
let shard = shard as u64;
|
||||||
|
|
||||||
|
// Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to
|
||||||
|
// clear it up.
|
||||||
|
//
|
||||||
|
// What happens here is:
|
||||||
|
//
|
||||||
|
// - If there was some crosslink root elected by the super-majority of this committee,
|
||||||
|
// then we reward all who voted for that root and penalize all that did not.
|
||||||
|
// - However, if there _was not_ some super-majority-voted crosslink root, then penalize
|
||||||
|
// all the validators.
|
||||||
|
//
|
||||||
|
// I'm not quite sure that the second case (no super-majority crosslink) is correct.
|
||||||
|
if let Some(winning_root) = winning_root_for_shards.get(&shard) {
|
||||||
|
// Hash set de-dedups and (hopefully) offers a speed improvement from faster
|
||||||
|
// lookups.
|
||||||
|
let attesting_validator_indices: HashSet<usize> =
|
||||||
|
HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned());
|
||||||
|
|
||||||
|
for &index in &crosslink_committee {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
|
||||||
|
let total_balance = state.get_total_balance(&crosslink_committee, spec);
|
||||||
|
|
||||||
|
if attesting_validator_indices.contains(&index) {
|
||||||
|
safe_add_assign!(
|
||||||
|
state.validator_balances[index],
|
||||||
|
base_reward * winning_root.total_attesting_balance / total_balance
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
safe_sub_assign!(state.validator_balances[index], base_reward);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for &index in &crosslink_committee {
|
||||||
|
let base_reward = state.base_reward(index, base_reward_quotient, spec);
|
||||||
|
|
||||||
|
safe_sub_assign!(state.validator_balances[index], base_reward);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec v0.4.0
|
||||||
|
fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
||||||
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
let next_epoch = state.next_epoch(spec);
|
||||||
|
|
||||||
|
state.previous_shuffling_epoch = state.current_shuffling_epoch;
|
||||||
|
state.previous_shuffling_start_shard = state.current_shuffling_start_shard;
|
||||||
|
|
||||||
|
state.previous_shuffling_seed = state.current_shuffling_seed;
|
||||||
|
|
||||||
|
let should_update_validator_registy = if state.finalized_epoch
|
||||||
|
> state.validator_registry_update_epoch
|
||||||
|
{
|
||||||
|
(0..state.get_current_epoch_committee_count(spec)).all(|i| {
|
||||||
|
let shard = (state.current_shuffling_start_shard + i as u64) % spec.shard_count;
|
||||||
|
state.latest_crosslinks[shard as usize].epoch > state.validator_registry_update_epoch
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
if should_update_validator_registy {
|
||||||
|
state.update_validator_registry(spec);
|
||||||
|
|
||||||
|
state.current_shuffling_epoch = next_epoch;
|
||||||
|
state.current_shuffling_start_shard = (state.current_shuffling_start_shard
|
||||||
|
+ state.get_current_epoch_committee_count(spec) as u64)
|
||||||
|
% spec.shard_count;
|
||||||
|
state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)?
|
||||||
|
} else {
|
||||||
|
let epochs_since_last_registry_update =
|
||||||
|
current_epoch - state.validator_registry_update_epoch;
|
||||||
|
if (epochs_since_last_registry_update > 1)
|
||||||
|
& epochs_since_last_registry_update.is_power_of_two()
|
||||||
|
{
|
||||||
|
state.current_shuffling_epoch = next_epoch;
|
||||||
|
state.current_shuffling_seed =
|
||||||
|
state.generate_seed(state.current_shuffling_epoch, spec)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state.process_slashings(spec);
|
||||||
|
state.process_exit_queue(spec);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -0,0 +1,98 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Attesters {
|
||||||
|
pub indices: HashSet<usize>,
|
||||||
|
pub balance: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Attesters {
|
||||||
|
fn add(&mut self, additional_indices: &[usize], additional_balance: u64) {
|
||||||
|
self.indices.reserve(additional_indices.len());
|
||||||
|
for i in additional_indices {
|
||||||
|
self.indices.insert(*i);
|
||||||
|
}
|
||||||
|
self.balance.saturating_add(additional_balance);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AttesterSets {
|
||||||
|
pub current_epoch: Attesters,
|
||||||
|
pub current_epoch_boundary: Attesters,
|
||||||
|
pub previous_epoch: Attesters,
|
||||||
|
pub previous_epoch_boundary: Attesters,
|
||||||
|
pub previous_epoch_head: Attesters,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AttesterSets {
|
||||||
|
pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> {
|
||||||
|
let mut current_epoch = Attesters::default();
|
||||||
|
let mut current_epoch_boundary = Attesters::default();
|
||||||
|
let mut previous_epoch = Attesters::default();
|
||||||
|
let mut previous_epoch_boundary = Attesters::default();
|
||||||
|
let mut previous_epoch_head = Attesters::default();
|
||||||
|
|
||||||
|
for a in &state.latest_attestations {
|
||||||
|
let attesting_indices =
|
||||||
|
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?;
|
||||||
|
let attesting_balance = state.get_total_balance(&attesting_indices, spec);
|
||||||
|
|
||||||
|
if is_from_epoch(a, state.current_epoch(spec), spec) {
|
||||||
|
current_epoch.add(&attesting_indices, attesting_balance);
|
||||||
|
|
||||||
|
if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? {
|
||||||
|
current_epoch_boundary.add(&attesting_indices, attesting_balance);
|
||||||
|
}
|
||||||
|
} else if is_from_epoch(a, state.previous_epoch(spec), spec) {
|
||||||
|
previous_epoch.add(&attesting_indices, attesting_balance);
|
||||||
|
|
||||||
|
if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? {
|
||||||
|
previous_epoch_boundary.add(&attesting_indices, attesting_balance);
|
||||||
|
}
|
||||||
|
|
||||||
|
if has_common_beacon_block_root(a, state, spec)? {
|
||||||
|
previous_epoch_head.add(&attesting_indices, attesting_balance);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
current_epoch,
|
||||||
|
current_epoch_boundary,
|
||||||
|
previous_epoch,
|
||||||
|
previous_epoch_boundary,
|
||||||
|
previous_epoch_head,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool {
|
||||||
|
a.data.slot.epoch(spec.slots_per_epoch) == epoch
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_common_epoch_boundary_root(
|
||||||
|
a: &PendingAttestation,
|
||||||
|
state: &BeaconState,
|
||||||
|
epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<bool, BeaconStateError> {
|
||||||
|
let slot = epoch.start_slot(spec.slots_per_epoch);
|
||||||
|
let state_boundary_root = *state
|
||||||
|
.get_block_root(slot, spec)
|
||||||
|
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
|
||||||
|
|
||||||
|
Ok(a.data.epoch_boundary_root == state_boundary_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_common_beacon_block_root(
|
||||||
|
a: &PendingAttestation,
|
||||||
|
state: &BeaconState,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<bool, BeaconStateError> {
|
||||||
|
let state_block_root = *state
|
||||||
|
.get_block_root(a.data.slot, spec)
|
||||||
|
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
|
||||||
|
|
||||||
|
Ok(a.data.beacon_block_root == state_block_root)
|
||||||
|
}
|
36
eth2/state_processing/src/per_epoch_processing/errors.rs
Normal file
36
eth2/state_processing/src/per_epoch_processing/errors.rs
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
use types::*;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum EpochProcessingError {
|
||||||
|
UnableToDetermineProducer,
|
||||||
|
NoBlockRoots,
|
||||||
|
BaseRewardQuotientIsZero,
|
||||||
|
NoRandaoSeed,
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
InclusionError(InclusionError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<InclusionError> for EpochProcessingError {
|
||||||
|
fn from(e: InclusionError) -> EpochProcessingError {
|
||||||
|
EpochProcessingError::InclusionError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for EpochProcessingError {
|
||||||
|
fn from(e: BeaconStateError) -> EpochProcessingError {
|
||||||
|
EpochProcessingError::BeaconStateError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum InclusionError {
|
||||||
|
/// The validator did not participate in an attestation in this period.
|
||||||
|
NoAttestationsForValidator,
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for InclusionError {
|
||||||
|
fn from(e: BeaconStateError) -> InclusionError {
|
||||||
|
InclusionError::BeaconStateError(e)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,61 @@
|
|||||||
|
use super::errors::InclusionError;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Returns the distance between the first included attestation for some validator and this
|
||||||
|
/// slot.
|
||||||
|
///
|
||||||
|
/// Note: In the spec this is defined "inline", not as a helper function.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn inclusion_distance(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestations: &[&PendingAttestation],
|
||||||
|
validator_index: usize,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<u64, InclusionError> {
|
||||||
|
let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?;
|
||||||
|
Ok((attestation.inclusion_slot - attestation.data.slot).as_u64())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the slot of the earliest included attestation for some validator.
|
||||||
|
///
|
||||||
|
/// Note: In the spec this is defined "inline", not as a helper function.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn inclusion_slot(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestations: &[&PendingAttestation],
|
||||||
|
validator_index: usize,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Slot, InclusionError> {
|
||||||
|
let attestation = earliest_included_attestation(state, attestations, validator_index, spec)?;
|
||||||
|
Ok(attestation.inclusion_slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds the earliest included attestation for some validator.
|
||||||
|
///
|
||||||
|
/// Note: In the spec this is defined "inline", not as a helper function.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn earliest_included_attestation(
|
||||||
|
state: &BeaconState,
|
||||||
|
attestations: &[&PendingAttestation],
|
||||||
|
validator_index: usize,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<PendingAttestation, InclusionError> {
|
||||||
|
let mut included_attestations = vec![];
|
||||||
|
|
||||||
|
for (i, a) in attestations.iter().enumerate() {
|
||||||
|
let participants =
|
||||||
|
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?;
|
||||||
|
if participants.iter().any(|i| *i == validator_index) {
|
||||||
|
included_attestations.push(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let earliest_attestation_index = included_attestations
|
||||||
|
.iter()
|
||||||
|
.min_by_key(|i| attestations[**i].inclusion_slot)
|
||||||
|
.ok_or_else(|| InclusionError::NoAttestationsForValidator)?;
|
||||||
|
Ok(attestations[*earliest_attestation_index].clone())
|
||||||
|
}
|
21
eth2/state_processing/src/per_epoch_processing/tests.rs
Normal file
21
eth2/state_processing/src/per_epoch_processing/tests.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#![cfg(test)]
|
||||||
|
use crate::per_epoch_processing;
|
||||||
|
use env_logger::{Builder, Env};
|
||||||
|
use types::beacon_state::BeaconStateBuilder;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runs_without_error() {
|
||||||
|
Builder::from_env(Env::default().default_filter_or("error")).init();
|
||||||
|
|
||||||
|
let mut builder = BeaconStateBuilder::new(8);
|
||||||
|
builder.spec = ChainSpec::few_validators();
|
||||||
|
|
||||||
|
builder.build().unwrap();
|
||||||
|
builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4);
|
||||||
|
|
||||||
|
let mut state = builder.cloned_state();
|
||||||
|
|
||||||
|
let spec = &builder.spec;
|
||||||
|
per_epoch_processing(&mut state, spec).unwrap();
|
||||||
|
}
|
118
eth2/state_processing/src/per_epoch_processing/winning_root.rs
Normal file
118
eth2/state_processing/src/per_epoch_processing/winning_root.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct WinningRoot {
|
||||||
|
pub crosslink_data_root: Hash256,
|
||||||
|
pub attesting_validator_indices: Vec<usize>,
|
||||||
|
pub total_attesting_balance: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WinningRoot {
|
||||||
|
/// Returns `true` if `self` is a "better" candidate than `other`.
|
||||||
|
///
|
||||||
|
/// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties
|
||||||
|
/// are broken by favouring the lower `crosslink_data_root` value.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn is_better_than(&self, other: &Self) -> bool {
|
||||||
|
if self.total_attesting_balance > other.total_attesting_balance {
|
||||||
|
true
|
||||||
|
} else if self.total_attesting_balance == other.total_attesting_balance {
|
||||||
|
self.crosslink_data_root < other.crosslink_data_root
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `crosslink_data_root` with the highest total attesting balance for the given shard.
|
||||||
|
/// Breaks ties by favouring the smaller `crosslink_data_root` hash.
|
||||||
|
///
|
||||||
|
/// The `WinningRoot` object also contains additional fields that are useful in later stages of
|
||||||
|
/// per-epoch processing.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn winning_root(
|
||||||
|
state: &BeaconState,
|
||||||
|
shard: u64,
|
||||||
|
current_epoch_attestations: &[&PendingAttestation],
|
||||||
|
previous_epoch_attestations: &[&PendingAttestation],
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Option<WinningRoot>, BeaconStateError> {
|
||||||
|
let mut winning_root: Option<WinningRoot> = None;
|
||||||
|
|
||||||
|
let crosslink_data_roots: HashSet<Hash256> = HashSet::from_iter(
|
||||||
|
previous_epoch_attestations
|
||||||
|
.iter()
|
||||||
|
.chain(current_epoch_attestations.iter())
|
||||||
|
.filter_map(|a| {
|
||||||
|
if a.data.shard == shard {
|
||||||
|
Some(a.data.crosslink_data_root)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
for crosslink_data_root in crosslink_data_roots {
|
||||||
|
let attesting_validator_indices = get_attesting_validator_indices(
|
||||||
|
state,
|
||||||
|
shard,
|
||||||
|
current_epoch_attestations,
|
||||||
|
previous_epoch_attestations,
|
||||||
|
&crosslink_data_root,
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let total_attesting_balance: u64 = attesting_validator_indices
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
|
||||||
|
|
||||||
|
let candidate = WinningRoot {
|
||||||
|
crosslink_data_root,
|
||||||
|
attesting_validator_indices,
|
||||||
|
total_attesting_balance,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ref winner) = winning_root {
|
||||||
|
if candidate.is_better_than(&winner) {
|
||||||
|
winning_root = Some(candidate);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
winning_root = Some(candidate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(winning_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns all indices which voted for a given crosslink. May contain duplicates.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
fn get_attesting_validator_indices(
|
||||||
|
state: &BeaconState,
|
||||||
|
shard: u64,
|
||||||
|
current_epoch_attestations: &[&PendingAttestation],
|
||||||
|
previous_epoch_attestations: &[&PendingAttestation],
|
||||||
|
crosslink_data_root: &Hash256,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Vec<usize>, BeaconStateError> {
|
||||||
|
let mut indices = vec![];
|
||||||
|
|
||||||
|
for a in current_epoch_attestations
|
||||||
|
.iter()
|
||||||
|
.chain(previous_epoch_attestations.iter())
|
||||||
|
{
|
||||||
|
if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) {
|
||||||
|
indices.append(&mut state.get_attestation_participants(
|
||||||
|
&a.data,
|
||||||
|
&a.aggregation_bitfield,
|
||||||
|
spec,
|
||||||
|
)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(indices)
|
||||||
|
}
|
58
eth2/state_processing/src/per_slot_processing.rs
Normal file
58
eth2/state_processing/src/per_slot_processing.rs
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
use crate::*;
|
||||||
|
use types::{BeaconState, BeaconStateError, ChainSpec, Hash256};
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Error {
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
EpochProcessingError(EpochProcessingError),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Advances a state forward by one slot, performing per-epoch processing if required.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn per_slot_processing(
|
||||||
|
state: &mut BeaconState,
|
||||||
|
previous_block_root: Hash256,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if (state.slot + 1) % spec.slots_per_epoch == 0 {
|
||||||
|
per_epoch_processing(state, spec)?;
|
||||||
|
state.advance_caches();
|
||||||
|
}
|
||||||
|
|
||||||
|
state.slot += 1;
|
||||||
|
|
||||||
|
update_block_roots(state, previous_block_root, spec);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the state's block roots as per-slot processing is performed.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn update_block_roots(state: &mut BeaconState, previous_block_root: Hash256, spec: &ChainSpec) {
|
||||||
|
state.latest_block_roots[(state.slot.as_usize() - 1) % spec.latest_block_roots_length] =
|
||||||
|
previous_block_root;
|
||||||
|
|
||||||
|
if state.slot.as_usize() % spec.latest_block_roots_length == 0 {
|
||||||
|
let root = merkle_root(&state.latest_block_roots[..]);
|
||||||
|
state.batched_block_roots.push(root);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merkle_root(_input: &[Hash256]) -> Hash256 {
|
||||||
|
// TODO: implement correctly.
|
||||||
|
Hash256::zero()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for Error {
|
||||||
|
fn from(e: BeaconStateError) -> Error {
|
||||||
|
Error::BeaconStateError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<EpochProcessingError> for Error {
|
||||||
|
fn from(e: EpochProcessingError) -> Error {
|
||||||
|
Error::EpochProcessingError(e)
|
||||||
|
}
|
||||||
|
}
|
@ -1,70 +0,0 @@
|
|||||||
use crate::{EpochProcessable, EpochProcessingError};
|
|
||||||
use types::{beacon_state::BeaconStateError, BeaconState, ChainSpec, Hash256};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
BeaconStateError(BeaconStateError),
|
|
||||||
EpochProcessingError(EpochProcessingError),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait SlotProcessable {
|
|
||||||
fn per_slot_processing(
|
|
||||||
&mut self,
|
|
||||||
previous_block_root: Hash256,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SlotProcessable for BeaconState
|
|
||||||
where
|
|
||||||
BeaconState: EpochProcessable,
|
|
||||||
{
|
|
||||||
fn per_slot_processing(
|
|
||||||
&mut self,
|
|
||||||
previous_block_root: Hash256,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if (self.slot + 1) % spec.epoch_length == 0 {
|
|
||||||
self.per_epoch_processing(spec)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.slot += 1;
|
|
||||||
|
|
||||||
self.latest_randao_mixes[self.slot.as_usize() % spec.latest_randao_mixes_length] =
|
|
||||||
self.latest_randao_mixes[(self.slot.as_usize() - 1) % spec.latest_randao_mixes_length];
|
|
||||||
|
|
||||||
// Block roots.
|
|
||||||
self.latest_block_roots[(self.slot.as_usize() - 1) % spec.latest_block_roots_length] =
|
|
||||||
previous_block_root;
|
|
||||||
|
|
||||||
if self.slot.as_usize() % spec.latest_block_roots_length == 0 {
|
|
||||||
let root = merkle_root(&self.latest_block_roots[..]);
|
|
||||||
self.batched_block_roots.push(root);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn merkle_root(_input: &[Hash256]) -> Hash256 {
|
|
||||||
Hash256::zero()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconStateError> for Error {
|
|
||||||
fn from(e: BeaconStateError) -> Error {
|
|
||||||
Error::BeaconStateError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<EpochProcessingError> for Error {
|
|
||||||
fn from(e: EpochProcessingError) -> Error {
|
|
||||||
Error::EpochProcessingError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
#[test]
|
|
||||||
fn it_works() {
|
|
||||||
assert_eq!(2 + 2, 4);
|
|
||||||
}
|
|
||||||
}
|
|
@ -7,9 +7,10 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../utils/bls" }
|
bls = { path = "../utils/bls" }
|
||||||
boolean-bitfield = { path = "../utils/boolean-bitfield" }
|
boolean-bitfield = { path = "../utils/boolean-bitfield" }
|
||||||
ethereum-types = "0.4.0"
|
ethereum-types = "0.5"
|
||||||
hashing = { path = "../utils/hashing" }
|
hashing = { path = "../utils/hashing" }
|
||||||
honey-badger-split = { path = "../utils/honey-badger-split" }
|
honey-badger-split = { path = "../utils/honey-badger-split" }
|
||||||
|
int_to_bytes = { path = "../utils/int_to_bytes" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
rayon = "1.0"
|
rayon = "1.0"
|
||||||
rand = "0.5.5"
|
rand = "0.5.5"
|
||||||
@ -20,6 +21,7 @@ slog = "^2.2.3"
|
|||||||
ssz = { path = "../utils/ssz" }
|
ssz = { path = "../utils/ssz" }
|
||||||
ssz_derive = { path = "../utils/ssz_derive" }
|
ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
|
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
|
||||||
|
test_random_derive = { path = "../utils/test_random_derive" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
env_logger = "0.6.0"
|
env_logger = "0.6.0"
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, Hash256};
|
use super::{AggregateSignature, AttestationData, Bitfield};
|
||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz::TreeHash;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)]
|
/// Details an attestation that can be slashable.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
|
||||||
pub struct Attestation {
|
pub struct Attestation {
|
||||||
pub aggregation_bitfield: Bitfield,
|
pub aggregation_bitfield: Bitfield,
|
||||||
pub data: AttestationData,
|
pub data: AttestationData,
|
||||||
@ -13,54 +17,11 @@ pub struct Attestation {
|
|||||||
pub aggregate_signature: AggregateSignature,
|
pub aggregate_signature: AggregateSignature,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Attestation {
|
|
||||||
pub fn canonical_root(&self) -> Hash256 {
|
|
||||||
Hash256::from(&self.hash_tree_root()[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> {
|
|
||||||
self.data.signable_message(custody_bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn verify_signature(
|
|
||||||
&self,
|
|
||||||
group_public_key: &AggregatePublicKey,
|
|
||||||
custody_bit: bool,
|
|
||||||
// TODO: use domain.
|
|
||||||
_domain: u64,
|
|
||||||
) -> bool {
|
|
||||||
self.aggregate_signature
|
|
||||||
.verify(&self.signable_message(custody_bit), group_public_key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for Attestation {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.aggregation_bitfield.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.data.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.custody_bitfield.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.aggregate_signature.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Attestation {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
data: <_>::random_for_test(rng),
|
|
||||||
aggregation_bitfield: <_>::random_for_test(rng),
|
|
||||||
custody_bitfield: <_>::random_for_test(rng),
|
|
||||||
aggregate_signature: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -1,28 +1,33 @@
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot};
|
use crate::{Crosslink, Epoch, Hash256, Slot};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz::TreeHash;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
pub const SSZ_ATTESTION_DATA_LENGTH: usize = {
|
/// The data upon which an attestation is based.
|
||||||
8 + // slot
|
///
|
||||||
8 + // shard
|
/// Spec v0.4.0
|
||||||
32 + // beacon_block_hash
|
#[derive(
|
||||||
32 + // epoch_boundary_root
|
Debug,
|
||||||
32 + // shard_block_hash
|
Clone,
|
||||||
32 + // latest_crosslink_hash
|
PartialEq,
|
||||||
8 + // justified_epoch
|
Default,
|
||||||
32 // justified_block_root
|
Serialize,
|
||||||
};
|
Hash,
|
||||||
|
Encode,
|
||||||
#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)]
|
Decode,
|
||||||
|
TreeHash,
|
||||||
|
TestRandom,
|
||||||
|
SignedRoot,
|
||||||
|
)]
|
||||||
pub struct AttestationData {
|
pub struct AttestationData {
|
||||||
pub slot: Slot,
|
pub slot: Slot,
|
||||||
pub shard: u64,
|
pub shard: u64,
|
||||||
pub beacon_block_root: Hash256,
|
pub beacon_block_root: Hash256,
|
||||||
pub epoch_boundary_root: Hash256,
|
pub epoch_boundary_root: Hash256,
|
||||||
pub shard_block_root: Hash256,
|
pub crosslink_data_root: Hash256,
|
||||||
pub latest_crosslink: Crosslink,
|
pub latest_crosslink: Crosslink,
|
||||||
pub justified_epoch: Epoch,
|
pub justified_epoch: Epoch,
|
||||||
pub justified_block_root: Hash256,
|
pub justified_block_root: Hash256,
|
||||||
@ -30,55 +35,11 @@ pub struct AttestationData {
|
|||||||
|
|
||||||
impl Eq for AttestationData {}
|
impl Eq for AttestationData {}
|
||||||
|
|
||||||
impl AttestationData {
|
|
||||||
pub fn canonical_root(&self) -> Hash256 {
|
|
||||||
Hash256::from(&self.hash_tree_root()[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signable_message(&self, custody_bit: bool) -> Vec<u8> {
|
|
||||||
let attestation_data_and_custody_bit = AttestationDataAndCustodyBit {
|
|
||||||
data: self.clone(),
|
|
||||||
custody_bit,
|
|
||||||
};
|
|
||||||
attestation_data_and_custody_bit.hash_tree_root()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for AttestationData {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.slot.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.shard.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.beacon_block_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.epoch_boundary_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.shard_block_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.latest_crosslink.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.justified_epoch.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.justified_block_root.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for AttestationData {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
slot: <_>::random_for_test(rng),
|
|
||||||
shard: <_>::random_for_test(rng),
|
|
||||||
beacon_block_root: <_>::random_for_test(rng),
|
|
||||||
epoch_boundary_root: <_>::random_for_test(rng),
|
|
||||||
shard_block_root: <_>::random_for_test(rng),
|
|
||||||
latest_crosslink: <_>::random_for_test(rng),
|
|
||||||
justified_epoch: <_>::random_for_test(rng),
|
|
||||||
justified_block_root: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -2,31 +2,22 @@ use super::AttestationData;
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::TreeHash;
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)]
|
/// Used for pairing an attestation with a proof-of-custody.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)]
|
||||||
pub struct AttestationDataAndCustodyBit {
|
pub struct AttestationDataAndCustodyBit {
|
||||||
pub data: AttestationData,
|
pub data: AttestationData,
|
||||||
pub custody_bit: bool,
|
pub custody_bit: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for AttestationDataAndCustodyBit {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.data.hash_tree_root_internal());
|
|
||||||
// TODO: add bool ssz
|
|
||||||
// result.append(custody_bit.hash_tree_root_internal());
|
|
||||||
ssz::hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
|
impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
fn random_for_test(rng: &mut T) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data: <_>::random_for_test(rng),
|
data: <_>::random_for_test(rng),
|
||||||
// TODO: deal with bools
|
custody_bit: <_>::random_for_test(rng),
|
||||||
custody_bit: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -35,7 +26,7 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
|
|||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -1,38 +1,27 @@
|
|||||||
use crate::{test_utils::TestRandom, SlashableAttestation};
|
use crate::{test_utils::TestRandom, SlashableAttestation};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
mod builder;
|
||||||
|
|
||||||
|
pub use builder::AttesterSlashingBuilder;
|
||||||
|
|
||||||
|
/// Two conflicting attestations.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct AttesterSlashing {
|
pub struct AttesterSlashing {
|
||||||
pub slashable_attestation_1: SlashableAttestation,
|
pub slashable_attestation_1: SlashableAttestation,
|
||||||
pub slashable_attestation_2: SlashableAttestation,
|
pub slashable_attestation_2: SlashableAttestation,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for AttesterSlashing {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.slashable_attestation_1.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.slashable_attestation_2.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for AttesterSlashing {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
slashable_attestation_1: <_>::random_for_test(rng),
|
|
||||||
slashable_attestation_2: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
87
eth2/types/src/attester_slashing/builder.rs
Normal file
87
eth2/types/src/attester_slashing/builder.rs
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
use crate::*;
|
||||||
|
use ssz::TreeHash;
|
||||||
|
|
||||||
|
/// Builds an `AttesterSlashing`.
|
||||||
|
pub struct AttesterSlashingBuilder();
|
||||||
|
|
||||||
|
impl AttesterSlashingBuilder {
|
||||||
|
/// Builds an `AttesterSlashing` that is a double vote.
|
||||||
|
///
|
||||||
|
/// The `signer` function is used to sign the double-vote and accepts:
|
||||||
|
///
|
||||||
|
/// - `validator_index: u64`
|
||||||
|
/// - `message: &[u8]`
|
||||||
|
/// - `epoch: Epoch`
|
||||||
|
/// - `domain: Domain`
|
||||||
|
///
|
||||||
|
/// Where domain is a domain "constant" (e.g., `spec.domain_attestation`).
|
||||||
|
pub fn double_vote<F>(validator_indices: &[u64], signer: F) -> AttesterSlashing
|
||||||
|
where
|
||||||
|
F: Fn(u64, &[u8], Epoch, Domain) -> Signature,
|
||||||
|
{
|
||||||
|
let double_voted_slot = Slot::new(0);
|
||||||
|
let shard = 0;
|
||||||
|
let justified_epoch = Epoch::new(0);
|
||||||
|
let epoch = Epoch::new(0);
|
||||||
|
let hash_1 = Hash256::from_low_u64_le(1);
|
||||||
|
let hash_2 = Hash256::from_low_u64_le(2);
|
||||||
|
|
||||||
|
let mut slashable_attestation_1 = SlashableAttestation {
|
||||||
|
validator_indices: validator_indices.to_vec(),
|
||||||
|
data: AttestationData {
|
||||||
|
slot: double_voted_slot,
|
||||||
|
shard,
|
||||||
|
beacon_block_root: hash_1,
|
||||||
|
epoch_boundary_root: hash_1,
|
||||||
|
crosslink_data_root: hash_1,
|
||||||
|
latest_crosslink: Crosslink {
|
||||||
|
epoch,
|
||||||
|
crosslink_data_root: hash_1,
|
||||||
|
},
|
||||||
|
justified_epoch,
|
||||||
|
justified_block_root: hash_1,
|
||||||
|
},
|
||||||
|
custody_bitfield: Bitfield::new(),
|
||||||
|
aggregate_signature: AggregateSignature::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut slashable_attestation_2 = SlashableAttestation {
|
||||||
|
validator_indices: validator_indices.to_vec(),
|
||||||
|
data: AttestationData {
|
||||||
|
slot: double_voted_slot,
|
||||||
|
shard,
|
||||||
|
beacon_block_root: hash_2,
|
||||||
|
epoch_boundary_root: hash_2,
|
||||||
|
crosslink_data_root: hash_2,
|
||||||
|
latest_crosslink: Crosslink {
|
||||||
|
epoch,
|
||||||
|
crosslink_data_root: hash_2,
|
||||||
|
},
|
||||||
|
justified_epoch,
|
||||||
|
justified_block_root: hash_2,
|
||||||
|
},
|
||||||
|
custody_bitfield: Bitfield::new(),
|
||||||
|
aggregate_signature: AggregateSignature::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let add_signatures = |attestation: &mut SlashableAttestation| {
|
||||||
|
for (i, validator_index) in validator_indices.iter().enumerate() {
|
||||||
|
let attestation_data_and_custody_bit = AttestationDataAndCustodyBit {
|
||||||
|
data: attestation.data.clone(),
|
||||||
|
custody_bit: attestation.custody_bitfield.get(i).unwrap(),
|
||||||
|
};
|
||||||
|
let message = attestation_data_and_custody_bit.hash_tree_root();
|
||||||
|
let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation);
|
||||||
|
attestation.aggregate_signature.add(&signature);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
add_signatures(&mut slashable_attestation_1);
|
||||||
|
add_signatures(&mut slashable_attestation_2);
|
||||||
|
|
||||||
|
AttesterSlashing {
|
||||||
|
slashable_attestation_1,
|
||||||
|
slashable_attestation_2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,20 +1,24 @@
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData, Slot};
|
use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot};
|
||||||
use bls::Signature;
|
use bls::Signature;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz::TreeHash;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
/// A block of the `BeaconChain`.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
|
||||||
pub struct BeaconBlock {
|
pub struct BeaconBlock {
|
||||||
pub slot: Slot,
|
pub slot: Slot,
|
||||||
pub parent_root: Hash256,
|
pub parent_root: Hash256,
|
||||||
pub state_root: Hash256,
|
pub state_root: Hash256,
|
||||||
pub randao_reveal: Signature,
|
pub randao_reveal: Signature,
|
||||||
pub eth1_data: Eth1Data,
|
pub eth1_data: Eth1Data,
|
||||||
pub signature: Signature,
|
|
||||||
pub body: BeaconBlockBody,
|
pub body: BeaconBlockBody,
|
||||||
|
pub signature: Signature,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BeaconBlock {
|
impl BeaconBlock {
|
||||||
@ -35,56 +39,15 @@ impl BeaconBlock {
|
|||||||
attester_slashings: vec![],
|
attester_slashings: vec![],
|
||||||
attestations: vec![],
|
attestations: vec![],
|
||||||
deposits: vec![],
|
deposits: vec![],
|
||||||
exits: vec![],
|
voluntary_exits: vec![],
|
||||||
|
transfers: vec![],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `hash_tree_root` of the block.
|
||||||
pub fn canonical_root(&self) -> Hash256 {
|
pub fn canonical_root(&self) -> Hash256 {
|
||||||
Hash256::from(&self.hash_tree_root()[..])
|
Hash256::from_slice(&self.hash_tree_root()[..])
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proposal_root(&self, spec: &ChainSpec) -> Hash256 {
|
|
||||||
let block_without_signature_root = {
|
|
||||||
let mut block_without_signature = self.clone();
|
|
||||||
block_without_signature.signature = spec.empty_signature.clone();
|
|
||||||
block_without_signature.canonical_root()
|
|
||||||
};
|
|
||||||
|
|
||||||
let proposal = ProposalSignedData {
|
|
||||||
slot: self.slot,
|
|
||||||
shard: spec.beacon_chain_shard_number,
|
|
||||||
block_root: block_without_signature_root,
|
|
||||||
};
|
|
||||||
Hash256::from(&proposal.hash_tree_root()[..])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for BeaconBlock {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.slot.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.parent_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.state_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.randao_reveal.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.eth1_data.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.signature.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.body.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for BeaconBlock {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
slot: <_>::random_for_test(rng),
|
|
||||||
parent_root: <_>::random_for_test(rng),
|
|
||||||
state_root: <_>::random_for_test(rng),
|
|
||||||
randao_reveal: <_>::random_for_test(rng),
|
|
||||||
eth1_data: <_>::random_for_test(rng),
|
|
||||||
signature: <_>::random_for_test(rng),
|
|
||||||
body: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +55,7 @@ impl<T: RngCore> TestRandom<T> for BeaconBlock {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -1,48 +1,28 @@
|
|||||||
use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing};
|
use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit};
|
||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
|
/// The body of a `BeaconChain` block, containing operations.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct BeaconBlockBody {
|
pub struct BeaconBlockBody {
|
||||||
pub proposer_slashings: Vec<ProposerSlashing>,
|
pub proposer_slashings: Vec<ProposerSlashing>,
|
||||||
pub attester_slashings: Vec<AttesterSlashing>,
|
pub attester_slashings: Vec<AttesterSlashing>,
|
||||||
pub attestations: Vec<Attestation>,
|
pub attestations: Vec<Attestation>,
|
||||||
pub deposits: Vec<Deposit>,
|
pub deposits: Vec<Deposit>,
|
||||||
pub exits: Vec<Exit>,
|
pub voluntary_exits: Vec<VoluntaryExit>,
|
||||||
}
|
pub transfers: Vec<Transfer>,
|
||||||
|
|
||||||
impl TreeHash for BeaconBlockBody {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.proposer_slashings.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.attester_slashings.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.attestations.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.deposits.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.exits.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for BeaconBlockBody {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
proposer_slashings: <_>::random_for_test(rng),
|
|
||||||
attester_slashings: <_>::random_for_test(rng),
|
|
||||||
attestations: <_>::random_for_test(rng),
|
|
||||||
deposits: <_>::random_for_test(rng),
|
|
||||||
exits: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
File diff suppressed because it is too large
Load Diff
263
eth2/types/src/beacon_state/builder.rs
Normal file
263
eth2/types/src/beacon_state/builder.rs
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
use crate::*;
|
||||||
|
use bls::create_proof_of_possession;
|
||||||
|
|
||||||
|
/// Builds a `BeaconState` for use in testing or benchmarking.
|
||||||
|
///
|
||||||
|
/// Building the `BeaconState` is a three step processes:
|
||||||
|
///
|
||||||
|
/// 1. Create a new `BeaconStateBuilder`.
|
||||||
|
/// 2. Call `Self::build()` or `Self::build_fast()` generate a `BeaconState`.
|
||||||
|
/// 3. (Optional) Use builder functions to modify the `BeaconState`.
|
||||||
|
/// 4. Call `Self::cloned_state()` to obtain a `BeaconState` cloned from this struct.
|
||||||
|
///
|
||||||
|
/// Step (2) happens prior to step (3) because some functionality requires an existing
|
||||||
|
/// `BeaconState`.
|
||||||
|
///
|
||||||
|
/// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to
|
||||||
|
/// allow access to `self.keypairs` and `self.spec`.
|
||||||
|
pub struct BeaconStateBuilder {
|
||||||
|
pub validator_count: usize,
|
||||||
|
pub state: Option<BeaconState>,
|
||||||
|
pub genesis_time: u64,
|
||||||
|
pub latest_eth1_data: Eth1Data,
|
||||||
|
pub spec: ChainSpec,
|
||||||
|
pub keypairs: Vec<Keypair>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BeaconStateBuilder {
|
||||||
|
/// Create a new builder with the given number of validators.
|
||||||
|
pub fn new(validator_count: usize) -> Self {
|
||||||
|
let genesis_time = 10_000_000;
|
||||||
|
|
||||||
|
let latest_eth1_data = Eth1Data {
|
||||||
|
deposit_root: Hash256::zero(),
|
||||||
|
block_hash: Hash256::zero(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let spec = ChainSpec::foundation();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
validator_count,
|
||||||
|
state: None,
|
||||||
|
genesis_time,
|
||||||
|
latest_eth1_data,
|
||||||
|
spec,
|
||||||
|
keypairs: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a `BeaconState` using the `BeaconState::genesis(..)` function.
|
||||||
|
///
|
||||||
|
/// Each validator is assigned a unique, randomly-generated keypair and all
|
||||||
|
/// proof-of-possessions are verified during genesis.
|
||||||
|
pub fn build(&mut self) -> Result<(), BeaconStateError> {
|
||||||
|
self.keypairs = (0..self.validator_count)
|
||||||
|
.collect::<Vec<usize>>()
|
||||||
|
.iter()
|
||||||
|
.map(|_| Keypair::random())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let initial_validator_deposits = self
|
||||||
|
.keypairs
|
||||||
|
.iter()
|
||||||
|
.map(|keypair| Deposit {
|
||||||
|
branch: vec![], // branch verification is not specified.
|
||||||
|
index: 0, // index verification is not specified.
|
||||||
|
deposit_data: DepositData {
|
||||||
|
amount: 32_000_000_000, // 32 ETH (in Gwei)
|
||||||
|
timestamp: self.genesis_time - 1,
|
||||||
|
deposit_input: DepositInput {
|
||||||
|
pubkey: keypair.pk.clone(),
|
||||||
|
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
|
||||||
|
proof_of_possession: create_proof_of_possession(&keypair),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let state = BeaconState::genesis(
|
||||||
|
self.genesis_time,
|
||||||
|
initial_validator_deposits,
|
||||||
|
self.latest_eth1_data.clone(),
|
||||||
|
&self.spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.state = Some(state);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a `BeaconState` using the `BeaconState::genesis(..)` function, without supplying any
|
||||||
|
/// validators. Instead validators are added to the state post-genesis.
|
||||||
|
///
|
||||||
|
/// One keypair is randomly generated and all validators are assigned this same keypair.
|
||||||
|
/// Proof-of-possessions are not created (or validated).
|
||||||
|
///
|
||||||
|
/// This function runs orders of magnitude faster than `Self::build()`, however it will be
|
||||||
|
/// erroneous for functions which use a validators public key as an identifier (e.g.,
|
||||||
|
/// deposits).
|
||||||
|
pub fn build_fast(&mut self) -> Result<(), BeaconStateError> {
|
||||||
|
let common_keypair = Keypair::random();
|
||||||
|
|
||||||
|
let mut validator_registry = Vec::with_capacity(self.validator_count);
|
||||||
|
let mut validator_balances = Vec::with_capacity(self.validator_count);
|
||||||
|
self.keypairs = Vec::with_capacity(self.validator_count);
|
||||||
|
|
||||||
|
for _ in 0..self.validator_count {
|
||||||
|
self.keypairs.push(common_keypair.clone());
|
||||||
|
validator_balances.push(32_000_000_000);
|
||||||
|
validator_registry.push(Validator {
|
||||||
|
pubkey: common_keypair.pk.clone(),
|
||||||
|
withdrawal_credentials: Hash256::zero(),
|
||||||
|
activation_epoch: self.spec.genesis_epoch,
|
||||||
|
..Validator::default()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
let state = BeaconState {
|
||||||
|
validator_registry,
|
||||||
|
validator_balances,
|
||||||
|
..BeaconState::genesis(
|
||||||
|
self.genesis_time,
|
||||||
|
vec![],
|
||||||
|
self.latest_eth1_data.clone(),
|
||||||
|
&self.spec,
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
|
self.state = Some(state);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the `BeaconState` to be in the last slot of the given epoch.
|
||||||
|
///
|
||||||
|
/// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e.,
|
||||||
|
/// highest justified and finalized slots, full justification bitfield, etc).
|
||||||
|
pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) {
|
||||||
|
let state = self.state.as_mut().expect("Genesis required");
|
||||||
|
|
||||||
|
let slot = epoch.end_slot(self.spec.slots_per_epoch);
|
||||||
|
|
||||||
|
state.slot = slot;
|
||||||
|
state.validator_registry_update_epoch = epoch - 1;
|
||||||
|
|
||||||
|
state.previous_shuffling_epoch = epoch - 1;
|
||||||
|
state.current_shuffling_epoch = epoch;
|
||||||
|
|
||||||
|
state.previous_shuffling_seed = Hash256::from_low_u64_le(0);
|
||||||
|
state.current_shuffling_seed = Hash256::from_low_u64_le(1);
|
||||||
|
|
||||||
|
state.previous_justified_epoch = epoch - 2;
|
||||||
|
state.justified_epoch = epoch - 1;
|
||||||
|
state.justification_bitfield = u64::max_value();
|
||||||
|
state.finalized_epoch = epoch - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a full set of attestations for the `BeaconState`. Each attestation has full
|
||||||
|
/// participation from its committee and references the expected beacon_block hashes.
|
||||||
|
///
|
||||||
|
/// These attestations should be fully conducive to justification and finalization.
|
||||||
|
pub fn insert_attestations(&mut self) {
|
||||||
|
let state = self.state.as_mut().expect("Genesis required");
|
||||||
|
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Previous, &self.spec)
|
||||||
|
.unwrap();
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Current, &self.spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let current_epoch = state.current_epoch(&self.spec);
|
||||||
|
let previous_epoch = state.previous_epoch(&self.spec);
|
||||||
|
let current_epoch_depth =
|
||||||
|
(state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize();
|
||||||
|
|
||||||
|
let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch);
|
||||||
|
let current_epoch_slots = current_epoch
|
||||||
|
.slot_iter(self.spec.slots_per_epoch)
|
||||||
|
.take(current_epoch_depth);
|
||||||
|
|
||||||
|
for slot in previous_epoch_slots.chain(current_epoch_slots) {
|
||||||
|
let committees = state
|
||||||
|
.get_crosslink_committees_at_slot(slot, &self.spec)
|
||||||
|
.unwrap()
|
||||||
|
.clone();
|
||||||
|
|
||||||
|
for (committee, shard) in committees {
|
||||||
|
state
|
||||||
|
.latest_attestations
|
||||||
|
.push(committee_to_pending_attestation(
|
||||||
|
state, &committee, shard, slot, &self.spec,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a cloned `BeaconState`.
|
||||||
|
pub fn cloned_state(&self) -> BeaconState {
|
||||||
|
self.state.as_ref().expect("Genesis required").clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds a valid PendingAttestation with full participation for some committee.
|
||||||
|
fn committee_to_pending_attestation(
|
||||||
|
state: &BeaconState,
|
||||||
|
committee: &[usize],
|
||||||
|
shard: u64,
|
||||||
|
slot: Slot,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> PendingAttestation {
|
||||||
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
let previous_epoch = state.previous_epoch(spec);
|
||||||
|
|
||||||
|
let mut aggregation_bitfield = Bitfield::new();
|
||||||
|
let mut custody_bitfield = Bitfield::new();
|
||||||
|
|
||||||
|
for (i, _) in committee.iter().enumerate() {
|
||||||
|
aggregation_bitfield.set(i, true);
|
||||||
|
custody_bitfield.set(i, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_previous_epoch =
|
||||||
|
state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch);
|
||||||
|
|
||||||
|
let justified_epoch = if is_previous_epoch {
|
||||||
|
state.previous_justified_epoch
|
||||||
|
} else {
|
||||||
|
state.justified_epoch
|
||||||
|
};
|
||||||
|
|
||||||
|
let epoch_boundary_root = if is_previous_epoch {
|
||||||
|
*state
|
||||||
|
.get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec)
|
||||||
|
.unwrap()
|
||||||
|
} else {
|
||||||
|
*state
|
||||||
|
.get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec)
|
||||||
|
.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
let justified_block_root = *state
|
||||||
|
.get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
PendingAttestation {
|
||||||
|
aggregation_bitfield,
|
||||||
|
data: AttestationData {
|
||||||
|
slot,
|
||||||
|
shard,
|
||||||
|
beacon_block_root: *state.get_block_root(slot, spec).unwrap(),
|
||||||
|
epoch_boundary_root,
|
||||||
|
crosslink_data_root: Hash256::zero(),
|
||||||
|
latest_crosslink: Crosslink {
|
||||||
|
epoch: slot.epoch(spec.slots_per_epoch),
|
||||||
|
crosslink_data_root: Hash256::zero(),
|
||||||
|
},
|
||||||
|
justified_epoch,
|
||||||
|
justified_block_root,
|
||||||
|
},
|
||||||
|
custody_bitfield,
|
||||||
|
inclusion_slot: slot,
|
||||||
|
}
|
||||||
|
}
|
84
eth2/types/src/beacon_state/epoch_cache.rs
Normal file
84
eth2/types/src/beacon_state/epoch_cache.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
use super::{AttestationDutyMap, BeaconState, CrosslinkCommittees, Error, ShardCommitteeIndexMap};
|
||||||
|
use crate::{ChainSpec, Epoch};
|
||||||
|
use log::trace;
|
||||||
|
use serde_derive::Serialize;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||||
|
pub struct EpochCache {
|
||||||
|
/// True if this cache has been initialized.
|
||||||
|
pub initialized: bool,
|
||||||
|
/// The crosslink committees for an epoch.
|
||||||
|
pub committees: Vec<CrosslinkCommittees>,
|
||||||
|
/// Maps validator index to a slot, shard and committee index for attestation.
|
||||||
|
pub attestation_duty_map: AttestationDutyMap,
|
||||||
|
/// Maps a shard to an index of `self.committees`.
|
||||||
|
pub shard_committee_index_map: ShardCommitteeIndexMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EpochCache {
|
||||||
|
pub fn empty() -> EpochCache {
|
||||||
|
EpochCache {
|
||||||
|
initialized: false,
|
||||||
|
committees: vec![],
|
||||||
|
attestation_duty_map: AttestationDutyMap::new(),
|
||||||
|
shard_committee_index_map: ShardCommitteeIndexMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn initialized(
|
||||||
|
state: &BeaconState,
|
||||||
|
epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<EpochCache, Error> {
|
||||||
|
let mut epoch_committees: Vec<CrosslinkCommittees> =
|
||||||
|
Vec::with_capacity(spec.slots_per_epoch as usize);
|
||||||
|
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
|
||||||
|
let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new();
|
||||||
|
|
||||||
|
let shuffling =
|
||||||
|
state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?;
|
||||||
|
|
||||||
|
for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() {
|
||||||
|
let slot_committees = state.calculate_crosslink_committees_at_slot(
|
||||||
|
slot,
|
||||||
|
false,
|
||||||
|
shuffling.clone(),
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() {
|
||||||
|
// Empty committees are not permitted.
|
||||||
|
if committee.is_empty() {
|
||||||
|
return Err(Error::InsufficientValidators);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"shard: {}, epoch_i: {}, slot_i: {}",
|
||||||
|
shard,
|
||||||
|
epoch_committeess_index,
|
||||||
|
slot_committees_index
|
||||||
|
);
|
||||||
|
|
||||||
|
shard_committee_index_map
|
||||||
|
.insert(*shard, (epoch_committeess_index, slot_committees_index));
|
||||||
|
|
||||||
|
for (committee_index, validator_index) in committee.iter().enumerate() {
|
||||||
|
attestation_duty_map.insert(
|
||||||
|
*validator_index as u64,
|
||||||
|
(slot, *shard, committee_index as u64),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
epoch_committees.push(slot_committees)
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(EpochCache {
|
||||||
|
initialized: true,
|
||||||
|
committees: epoch_committees,
|
||||||
|
attestation_duty_map,
|
||||||
|
shard_committee_index_map,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
20
eth2/types/src/beacon_state/helpers.rs
Normal file
20
eth2/types/src/beacon_state/helpers.rs
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
use crate::*;
|
||||||
|
|
||||||
|
/// Verify ``bitfield`` against the ``committee_size``.
|
||||||
|
///
|
||||||
|
/// Is title `verify_bitfield` in spec.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool {
|
||||||
|
if bitfield.num_bytes() != ((committee_size + 7) / 8) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in committee_size..(bitfield.num_bytes() * 8) {
|
||||||
|
if bitfield.get(i).expect("Impossible due to previous check.") {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
@ -2,75 +2,62 @@
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use crate::{
|
use crate::{BeaconState, ChainSpec};
|
||||||
beacon_state::BeaconStateError, BeaconState, ChainSpec, Deposit, DepositData, DepositInput,
|
|
||||||
Eth1Data, Hash256, Keypair,
|
|
||||||
};
|
|
||||||
use bls::create_proof_of_possession;
|
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable};
|
||||||
|
|
||||||
struct BeaconStateTestBuilder {
|
|
||||||
pub genesis_time: u64,
|
|
||||||
pub initial_validator_deposits: Vec<Deposit>,
|
|
||||||
pub latest_eth1_data: Eth1Data,
|
|
||||||
pub spec: ChainSpec,
|
|
||||||
pub keypairs: Vec<Keypair>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BeaconStateTestBuilder {
|
|
||||||
pub fn with_random_validators(validator_count: usize) -> Self {
|
|
||||||
let genesis_time = 10_000_000;
|
|
||||||
let keypairs: Vec<Keypair> = (0..validator_count)
|
|
||||||
.collect::<Vec<usize>>()
|
|
||||||
.iter()
|
|
||||||
.map(|_| Keypair::random())
|
|
||||||
.collect();
|
|
||||||
let initial_validator_deposits = keypairs
|
|
||||||
.iter()
|
|
||||||
.map(|keypair| Deposit {
|
|
||||||
branch: vec![], // branch verification is not specified.
|
|
||||||
index: 0, // index verification is not specified.
|
|
||||||
deposit_data: DepositData {
|
|
||||||
amount: 32_000_000_000, // 32 ETH (in Gwei)
|
|
||||||
timestamp: genesis_time - 1,
|
|
||||||
deposit_input: DepositInput {
|
|
||||||
pubkey: keypair.pk.clone(),
|
|
||||||
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
|
|
||||||
proof_of_possession: create_proof_of_possession(&keypair),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let latest_eth1_data = Eth1Data {
|
|
||||||
deposit_root: Hash256::zero(),
|
|
||||||
block_hash: Hash256::zero(),
|
|
||||||
};
|
|
||||||
let spec = ChainSpec::foundation();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
genesis_time,
|
|
||||||
initial_validator_deposits,
|
|
||||||
latest_eth1_data,
|
|
||||||
spec,
|
|
||||||
keypairs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(&self) -> Result<BeaconState, BeaconStateError> {
|
|
||||||
BeaconState::genesis(
|
|
||||||
self.genesis_time,
|
|
||||||
self.initial_validator_deposits.clone(),
|
|
||||||
self.latest_eth1_data.clone(),
|
|
||||||
&self.spec,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn can_produce_genesis_block() {
|
pub fn can_produce_genesis_block() {
|
||||||
let builder = BeaconStateTestBuilder::with_random_validators(2);
|
let mut builder = BeaconStateBuilder::new(2);
|
||||||
|
builder.build().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tests that `get_attestation_participants` is consistent with the result of
|
||||||
|
/// get_crosslink_committees_at_slot` with a full bitfield.
|
||||||
|
#[test]
|
||||||
|
pub fn get_attestation_participants_consistency() {
|
||||||
|
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||||
|
|
||||||
|
let mut builder = BeaconStateBuilder::new(8);
|
||||||
|
builder.spec = ChainSpec::few_validators();
|
||||||
|
|
||||||
builder.build().unwrap();
|
builder.build().unwrap();
|
||||||
|
|
||||||
|
let mut state = builder.cloned_state();
|
||||||
|
let spec = builder.spec.clone();
|
||||||
|
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Previous, &spec)
|
||||||
|
.unwrap();
|
||||||
|
state
|
||||||
|
.build_epoch_cache(RelativeEpoch::Current, &spec)
|
||||||
|
.unwrap();
|
||||||
|
state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap();
|
||||||
|
|
||||||
|
for slot in state
|
||||||
|
.slot
|
||||||
|
.epoch(spec.slots_per_epoch)
|
||||||
|
.slot_iter(spec.slots_per_epoch)
|
||||||
|
{
|
||||||
|
let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap();
|
||||||
|
|
||||||
|
for (committee, shard) in committees {
|
||||||
|
let mut attestation_data = AttestationData::random_for_test(&mut rng);
|
||||||
|
attestation_data.slot = slot;
|
||||||
|
attestation_data.shard = *shard;
|
||||||
|
|
||||||
|
let mut bitfield = Bitfield::new();
|
||||||
|
for (i, _) in committee.iter().enumerate() {
|
||||||
|
bitfield.set(i, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
state
|
||||||
|
.get_attestation_participants(&attestation_data, &bitfield, &spec)
|
||||||
|
.unwrap(),
|
||||||
|
*committee
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -1,60 +0,0 @@
|
|||||||
use super::SlashableVoteData;
|
|
||||||
use crate::test_utils::TestRandom;
|
|
||||||
use rand::RngCore;
|
|
||||||
use serde_derive::Serialize;
|
|
||||||
use ssz::{hash, TreeHash};
|
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
|
||||||
pub struct CasperSlashing {
|
|
||||||
pub slashable_vote_data_1: SlashableVoteData,
|
|
||||||
pub slashable_vote_data_2: SlashableVoteData,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for CasperSlashing {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.slashable_vote_data_1.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.slashable_vote_data_2.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for CasperSlashing {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
slashable_vote_data_1: <_>::random_for_test(rng),
|
|
||||||
slashable_vote_data_2: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
|
||||||
use ssz::{ssz_encode, Decodable};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_ssz_round_trip() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = CasperSlashing::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let bytes = ssz_encode(&original);
|
|
||||||
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original, decoded);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_hash_tree_root_internal() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = CasperSlashing::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let result = original.hash_tree_root_internal();
|
|
||||||
|
|
||||||
assert_eq!(result.len(), 32);
|
|
||||||
// TODO: Add further tests
|
|
||||||
// https://github.com/sigp/lighthouse/issues/170
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,11 +1,20 @@
|
|||||||
use crate::{Address, Epoch, Hash256, Slot};
|
use crate::{Address, Epoch, Fork, Hash256, Slot};
|
||||||
use bls::Signature;
|
use bls::Signature;
|
||||||
|
|
||||||
const GWEI: u64 = 1_000_000_000;
|
const GWEI: u64 = 1_000_000_000;
|
||||||
|
|
||||||
|
pub enum Domain {
|
||||||
|
Deposit,
|
||||||
|
Attestation,
|
||||||
|
Proposal,
|
||||||
|
Exit,
|
||||||
|
Randao,
|
||||||
|
Transfer,
|
||||||
|
}
|
||||||
|
|
||||||
/// Holds all the "constants" for a BeaconChain.
|
/// Holds all the "constants" for a BeaconChain.
|
||||||
///
|
///
|
||||||
/// Spec v0.2.0
|
/// Spec v0.4.0
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Debug, Clone)]
|
||||||
pub struct ChainSpec {
|
pub struct ChainSpec {
|
||||||
/*
|
/*
|
||||||
@ -16,7 +25,7 @@ pub struct ChainSpec {
|
|||||||
pub max_balance_churn_quotient: u64,
|
pub max_balance_churn_quotient: u64,
|
||||||
pub beacon_chain_shard_number: u64,
|
pub beacon_chain_shard_number: u64,
|
||||||
pub max_indices_per_slashable_vote: u64,
|
pub max_indices_per_slashable_vote: u64,
|
||||||
pub max_withdrawals_per_epoch: u64,
|
pub max_exit_dequeues_per_epoch: u64,
|
||||||
pub shuffle_round_count: u8,
|
pub shuffle_round_count: u8,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -48,29 +57,30 @@ pub struct ChainSpec {
|
|||||||
/*
|
/*
|
||||||
* Time parameters
|
* Time parameters
|
||||||
*/
|
*/
|
||||||
pub slot_duration: u64,
|
pub seconds_per_slot: u64,
|
||||||
pub min_attestation_inclusion_delay: u64,
|
pub min_attestation_inclusion_delay: u64,
|
||||||
pub epoch_length: u64,
|
pub slots_per_epoch: u64,
|
||||||
pub seed_lookahead: Epoch,
|
pub min_seed_lookahead: Epoch,
|
||||||
pub entry_exit_delay: u64,
|
pub activation_exit_delay: u64,
|
||||||
pub eth1_data_voting_period: u64,
|
pub epochs_per_eth1_voting_period: u64,
|
||||||
pub min_validator_withdrawal_epochs: Epoch,
|
pub min_validator_withdrawability_delay: Epoch,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* State list lengths
|
* State list lengths
|
||||||
*/
|
*/
|
||||||
pub latest_block_roots_length: usize,
|
pub latest_block_roots_length: usize,
|
||||||
pub latest_randao_mixes_length: usize,
|
pub latest_randao_mixes_length: usize,
|
||||||
pub latest_index_roots_length: usize,
|
pub latest_active_index_roots_length: usize,
|
||||||
pub latest_penalized_exit_length: usize,
|
pub latest_slashed_exit_length: usize,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reward and penalty quotients
|
* Reward and penalty quotients
|
||||||
*/
|
*/
|
||||||
pub base_reward_quotient: u64,
|
pub base_reward_quotient: u64,
|
||||||
pub whistleblower_reward_quotient: u64,
|
pub whistleblower_reward_quotient: u64,
|
||||||
pub includer_reward_quotient: u64,
|
pub attestation_inclusion_reward_quotient: u64,
|
||||||
pub inactivity_penalty_quotient: u64,
|
pub inactivity_penalty_quotient: u64,
|
||||||
|
pub min_penalty_quotient: u64,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Max operations per block
|
* Max operations per block
|
||||||
@ -79,29 +89,63 @@ pub struct ChainSpec {
|
|||||||
pub max_attester_slashings: u64,
|
pub max_attester_slashings: u64,
|
||||||
pub max_attestations: u64,
|
pub max_attestations: u64,
|
||||||
pub max_deposits: u64,
|
pub max_deposits: u64,
|
||||||
pub max_exits: u64,
|
pub max_voluntary_exits: u64,
|
||||||
|
pub max_transfers: u64,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Signature domains
|
* Signature domains
|
||||||
|
*
|
||||||
|
* Fields should be private to prevent accessing a domain that hasn't been modified to suit
|
||||||
|
* some `Fork`.
|
||||||
|
*
|
||||||
|
* Use `ChainSpec::get_domain(..)` to access these values.
|
||||||
*/
|
*/
|
||||||
pub domain_deposit: u64,
|
domain_deposit: u64,
|
||||||
pub domain_attestation: u64,
|
domain_attestation: u64,
|
||||||
pub domain_proposal: u64,
|
domain_proposal: u64,
|
||||||
pub domain_exit: u64,
|
domain_exit: u64,
|
||||||
pub domain_randao: u64,
|
domain_randao: u64,
|
||||||
|
domain_transfer: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainSpec {
|
impl ChainSpec {
|
||||||
/// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation.
|
/// Return the number of committees in one epoch.
|
||||||
///
|
///
|
||||||
/// Of course, the actual foundation specs are unknown at this point so these are just a rough
|
/// Spec v0.4.0
|
||||||
/// estimate.
|
pub fn get_epoch_committee_count(&self, active_validator_count: usize) -> u64 {
|
||||||
|
std::cmp::max(
|
||||||
|
1,
|
||||||
|
std::cmp::min(
|
||||||
|
self.shard_count / self.slots_per_epoch,
|
||||||
|
active_validator_count as u64 / self.slots_per_epoch / self.target_committee_size,
|
||||||
|
),
|
||||||
|
) * self.slots_per_epoch
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the domain number that represents the fork meta and signature domain.
|
||||||
///
|
///
|
||||||
/// Spec v0.2.0
|
/// Spec v0.4.0
|
||||||
|
pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 {
|
||||||
|
let domain_constant = match domain {
|
||||||
|
Domain::Deposit => self.domain_deposit,
|
||||||
|
Domain::Attestation => self.domain_attestation,
|
||||||
|
Domain::Proposal => self.domain_proposal,
|
||||||
|
Domain::Exit => self.domain_exit,
|
||||||
|
Domain::Randao => self.domain_randao,
|
||||||
|
Domain::Transfer => self.domain_transfer,
|
||||||
|
};
|
||||||
|
|
||||||
|
let fork_version = fork.get_fork_version(epoch);
|
||||||
|
fork_version * u64::pow(2, 32) + domain_constant
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
pub fn foundation() -> Self {
|
pub fn foundation() -> Self {
|
||||||
let genesis_slot = Slot::new(2_u64.pow(19));
|
let genesis_slot = Slot::new(2_u64.pow(32));
|
||||||
let epoch_length = 64;
|
let slots_per_epoch = 64;
|
||||||
let genesis_epoch = genesis_slot.epoch(epoch_length);
|
let genesis_epoch = genesis_slot.epoch(slots_per_epoch);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
/*
|
/*
|
||||||
@ -112,7 +156,7 @@ impl ChainSpec {
|
|||||||
max_balance_churn_quotient: 32,
|
max_balance_churn_quotient: 32,
|
||||||
beacon_chain_shard_number: u64::max_value(),
|
beacon_chain_shard_number: u64::max_value(),
|
||||||
max_indices_per_slashable_vote: 4_096,
|
max_indices_per_slashable_vote: 4_096,
|
||||||
max_withdrawals_per_epoch: 4,
|
max_exit_dequeues_per_epoch: 4,
|
||||||
shuffle_round_count: 90,
|
shuffle_round_count: 90,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -133,7 +177,7 @@ impl ChainSpec {
|
|||||||
* Initial Values
|
* Initial Values
|
||||||
*/
|
*/
|
||||||
genesis_fork_version: 0,
|
genesis_fork_version: 0,
|
||||||
genesis_slot: Slot::new(2_u64.pow(19)),
|
genesis_slot,
|
||||||
genesis_epoch,
|
genesis_epoch,
|
||||||
genesis_start_shard: 0,
|
genesis_start_shard: 0,
|
||||||
far_future_epoch: Epoch::new(u64::max_value()),
|
far_future_epoch: Epoch::new(u64::max_value()),
|
||||||
@ -144,29 +188,30 @@ impl ChainSpec {
|
|||||||
/*
|
/*
|
||||||
* Time parameters
|
* Time parameters
|
||||||
*/
|
*/
|
||||||
slot_duration: 6,
|
seconds_per_slot: 6,
|
||||||
min_attestation_inclusion_delay: 4,
|
min_attestation_inclusion_delay: 4,
|
||||||
epoch_length,
|
slots_per_epoch,
|
||||||
seed_lookahead: Epoch::new(1),
|
min_seed_lookahead: Epoch::new(1),
|
||||||
entry_exit_delay: 4,
|
activation_exit_delay: 4,
|
||||||
eth1_data_voting_period: 16,
|
epochs_per_eth1_voting_period: 16,
|
||||||
min_validator_withdrawal_epochs: Epoch::new(256),
|
min_validator_withdrawability_delay: Epoch::new(256),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* State list lengths
|
* State list lengths
|
||||||
*/
|
*/
|
||||||
latest_block_roots_length: 8_192,
|
latest_block_roots_length: 8_192,
|
||||||
latest_randao_mixes_length: 8_192,
|
latest_randao_mixes_length: 8_192,
|
||||||
latest_index_roots_length: 8_192,
|
latest_active_index_roots_length: 8_192,
|
||||||
latest_penalized_exit_length: 8_192,
|
latest_slashed_exit_length: 8_192,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reward and penalty quotients
|
* Reward and penalty quotients
|
||||||
*/
|
*/
|
||||||
base_reward_quotient: 32,
|
base_reward_quotient: 32,
|
||||||
whistleblower_reward_quotient: 512,
|
whistleblower_reward_quotient: 512,
|
||||||
includer_reward_quotient: 8,
|
attestation_inclusion_reward_quotient: 8,
|
||||||
inactivity_penalty_quotient: 16_777_216,
|
inactivity_penalty_quotient: 16_777_216,
|
||||||
|
min_penalty_quotient: 32,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Max operations per block
|
* Max operations per block
|
||||||
@ -175,7 +220,8 @@ impl ChainSpec {
|
|||||||
max_attester_slashings: 1,
|
max_attester_slashings: 1,
|
||||||
max_attestations: 128,
|
max_attestations: 128,
|
||||||
max_deposits: 16,
|
max_deposits: 16,
|
||||||
max_exits: 16,
|
max_voluntary_exits: 16,
|
||||||
|
max_transfers: 16,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Signature domains
|
* Signature domains
|
||||||
@ -185,25 +231,24 @@ impl ChainSpec {
|
|||||||
domain_proposal: 2,
|
domain_proposal: 2,
|
||||||
domain_exit: 3,
|
domain_exit: 3,
|
||||||
domain_randao: 4,
|
domain_randao: 4,
|
||||||
|
domain_transfer: 5,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl ChainSpec {
|
|
||||||
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
|
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
|
||||||
///
|
///
|
||||||
/// Spec v0.2.0
|
/// Spec v0.4.0
|
||||||
pub fn few_validators() -> Self {
|
pub fn few_validators() -> Self {
|
||||||
let genesis_slot = Slot::new(2_u64.pow(19));
|
let genesis_slot = Slot::new(2_u64.pow(32));
|
||||||
let epoch_length = 8;
|
let slots_per_epoch = 8;
|
||||||
let genesis_epoch = genesis_slot.epoch(epoch_length);
|
let genesis_epoch = genesis_slot.epoch(slots_per_epoch);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
shard_count: 1,
|
shard_count: 8,
|
||||||
target_committee_size: 1,
|
target_committee_size: 1,
|
||||||
genesis_slot,
|
genesis_slot,
|
||||||
genesis_epoch,
|
genesis_epoch,
|
||||||
epoch_length,
|
slots_per_epoch,
|
||||||
..ChainSpec::foundation()
|
..ChainSpec::foundation()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,48 +2,25 @@ use crate::test_utils::TestRandom;
|
|||||||
use crate::{Epoch, Hash256};
|
use crate::{Epoch, Hash256};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode)]
|
/// Specifies the block hash for a shard at an epoch.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(
|
||||||
|
Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom,
|
||||||
|
)]
|
||||||
pub struct Crosslink {
|
pub struct Crosslink {
|
||||||
pub epoch: Epoch,
|
pub epoch: Epoch,
|
||||||
pub shard_block_root: Hash256,
|
pub crosslink_data_root: Hash256,
|
||||||
}
|
|
||||||
|
|
||||||
impl Crosslink {
|
|
||||||
/// Generates a new instance where `dynasty` and `hash` are both zero.
|
|
||||||
pub fn zero() -> Self {
|
|
||||||
Self {
|
|
||||||
epoch: Epoch::new(0),
|
|
||||||
shard_block_root: Hash256::zero(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for Crosslink {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.epoch.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.shard_block_root.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Crosslink {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
epoch: <_>::random_for_test(rng),
|
|
||||||
shard_block_root: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -2,41 +2,24 @@ use super::{DepositData, Hash256};
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
/// A deposit to potentially become a beacon chain validator.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct Deposit {
|
pub struct Deposit {
|
||||||
pub branch: Vec<Hash256>,
|
pub branch: Vec<Hash256>,
|
||||||
pub index: u64,
|
pub index: u64,
|
||||||
pub deposit_data: DepositData,
|
pub deposit_data: DepositData,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for Deposit {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.branch.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.index.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.deposit_data.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Deposit {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
branch: <_>::random_for_test(rng),
|
|
||||||
index: <_>::random_for_test(rng),
|
|
||||||
deposit_data: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -2,41 +2,24 @@ use super::DepositInput;
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
/// Data generated by the deposit contract.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct DepositData {
|
pub struct DepositData {
|
||||||
pub amount: u64,
|
pub amount: u64,
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
pub deposit_input: DepositInput,
|
pub deposit_input: DepositInput,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for DepositData {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.amount.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.timestamp.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.deposit_input.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for DepositData {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
amount: <_>::random_for_test(rng),
|
|
||||||
timestamp: <_>::random_for_test(rng),
|
|
||||||
deposit_input: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -3,41 +3,24 @@ use crate::test_utils::TestRandom;
|
|||||||
use bls::{PublicKey, Signature};
|
use bls::{PublicKey, Signature};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
/// The data supplied by the user to the deposit contract.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct DepositInput {
|
pub struct DepositInput {
|
||||||
pub pubkey: PublicKey,
|
pub pubkey: PublicKey,
|
||||||
pub withdrawal_credentials: Hash256,
|
pub withdrawal_credentials: Hash256,
|
||||||
pub proof_of_possession: Signature,
|
pub proof_of_possession: Signature,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for DepositInput {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.pubkey.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.withdrawal_credentials.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.proof_of_possession.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for DepositInput {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
pubkey: <_>::random_for_test(rng),
|
|
||||||
withdrawal_credentials: <_>::random_for_test(rng),
|
|
||||||
proof_of_possession: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -2,39 +2,23 @@ use super::Hash256;
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
// Note: this is refer to as DepositRootVote in specs
|
/// Contains data obtained from the Eth1 chain.
|
||||||
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct Eth1Data {
|
pub struct Eth1Data {
|
||||||
pub deposit_root: Hash256,
|
pub deposit_root: Hash256,
|
||||||
pub block_hash: Hash256,
|
pub block_hash: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for Eth1Data {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.deposit_root.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.block_hash.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Eth1Data {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
deposit_root: <_>::random_for_test(rng),
|
|
||||||
block_hash: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -2,39 +2,23 @@ use super::Eth1Data;
|
|||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
// Note: this is refer to as DepositRootVote in specs
|
/// A summation of votes for some `Eth1Data`.
|
||||||
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct Eth1DataVote {
|
pub struct Eth1DataVote {
|
||||||
pub eth1_data: Eth1Data,
|
pub eth1_data: Eth1Data,
|
||||||
pub vote_count: u64,
|
pub vote_count: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for Eth1DataVote {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.eth1_data.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.vote_count.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Eth1DataVote {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
eth1_data: <_>::random_for_test(rng),
|
|
||||||
vote_count: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -1,63 +0,0 @@
|
|||||||
use crate::{test_utils::TestRandom, Epoch};
|
|
||||||
use bls::Signature;
|
|
||||||
use rand::RngCore;
|
|
||||||
use serde_derive::Serialize;
|
|
||||||
use ssz::{hash, TreeHash};
|
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
|
||||||
pub struct Exit {
|
|
||||||
pub epoch: Epoch,
|
|
||||||
pub validator_index: u64,
|
|
||||||
pub signature: Signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for Exit {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.epoch.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.validator_index.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.signature.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Exit {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
epoch: <_>::random_for_test(rng),
|
|
||||||
validator_index: <_>::random_for_test(rng),
|
|
||||||
signature: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
|
||||||
use ssz::{ssz_encode, Decodable};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_ssz_round_trip() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = Exit::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let bytes = ssz_encode(&original);
|
|
||||||
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original, decoded);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_hash_tree_root_internal() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = Exit::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let result = original.hash_tree_root_internal();
|
|
||||||
|
|
||||||
assert_eq!(result.len(), 32);
|
|
||||||
// TODO: Add further tests
|
|
||||||
// https://github.com/sigp/lighthouse/issues/170
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,33 +1,28 @@
|
|||||||
use crate::{test_utils::TestRandom, Epoch};
|
use crate::{test_utils::TestRandom, Epoch};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode)]
|
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct Fork {
|
pub struct Fork {
|
||||||
pub previous_version: u64,
|
pub previous_version: u64,
|
||||||
pub current_version: u64,
|
pub current_version: u64,
|
||||||
pub epoch: Epoch,
|
pub epoch: Epoch,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for Fork {
|
impl Fork {
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
/// Return the fork version of the given ``epoch``.
|
||||||
let mut result: Vec<u8> = vec![];
|
///
|
||||||
result.append(&mut self.previous_version.hash_tree_root_internal());
|
/// Spec v0.4.0
|
||||||
result.append(&mut self.current_version.hash_tree_root_internal());
|
pub fn get_fork_version(&self, epoch: Epoch) -> u64 {
|
||||||
result.append(&mut self.epoch.hash_tree_root_internal());
|
if epoch < self.epoch {
|
||||||
hash(&result)
|
return self.previous_version;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for Fork {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
previous_version: <_>::random_for_test(rng),
|
|
||||||
current_version: <_>::random_for_test(rng),
|
|
||||||
epoch: <_>::random_for_test(rng),
|
|
||||||
}
|
}
|
||||||
|
self.current_version
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,7 +30,7 @@ impl<T: RngCore> TestRandom<T> for Fork {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
@ -7,7 +7,6 @@ pub mod attester_slashing;
|
|||||||
pub mod beacon_block;
|
pub mod beacon_block;
|
||||||
pub mod beacon_block_body;
|
pub mod beacon_block_body;
|
||||||
pub mod beacon_state;
|
pub mod beacon_state;
|
||||||
pub mod casper_slashing;
|
|
||||||
pub mod chain_spec;
|
pub mod chain_spec;
|
||||||
pub mod crosslink;
|
pub mod crosslink;
|
||||||
pub mod deposit;
|
pub mod deposit;
|
||||||
@ -15,23 +14,22 @@ pub mod deposit_data;
|
|||||||
pub mod deposit_input;
|
pub mod deposit_input;
|
||||||
pub mod eth1_data;
|
pub mod eth1_data;
|
||||||
pub mod eth1_data_vote;
|
pub mod eth1_data_vote;
|
||||||
pub mod exit;
|
|
||||||
pub mod fork;
|
pub mod fork;
|
||||||
pub mod free_attestation;
|
pub mod free_attestation;
|
||||||
pub mod pending_attestation;
|
pub mod pending_attestation;
|
||||||
pub mod proposal_signed_data;
|
pub mod proposal;
|
||||||
pub mod proposer_slashing;
|
pub mod proposer_slashing;
|
||||||
pub mod readers;
|
pub mod readers;
|
||||||
pub mod shard_reassignment_record;
|
pub mod shard_reassignment_record;
|
||||||
pub mod slashable_attestation;
|
pub mod slashable_attestation;
|
||||||
pub mod slashable_vote_data;
|
pub mod transfer;
|
||||||
|
pub mod voluntary_exit;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod slot_epoch_macros;
|
pub mod slot_epoch_macros;
|
||||||
pub mod slot_epoch;
|
pub mod slot_epoch;
|
||||||
pub mod slot_height;
|
pub mod slot_height;
|
||||||
pub mod validator;
|
pub mod validator;
|
||||||
pub mod validator_registry;
|
pub mod validator_registry;
|
||||||
pub mod validator_registry_delta_block;
|
|
||||||
|
|
||||||
use ethereum_types::{H160, H256, U256};
|
use ethereum_types::{H160, H256, U256};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -42,27 +40,25 @@ pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit;
|
|||||||
pub use crate::attester_slashing::AttesterSlashing;
|
pub use crate::attester_slashing::AttesterSlashing;
|
||||||
pub use crate::beacon_block::BeaconBlock;
|
pub use crate::beacon_block::BeaconBlock;
|
||||||
pub use crate::beacon_block_body::BeaconBlockBody;
|
pub use crate::beacon_block_body::BeaconBlockBody;
|
||||||
pub use crate::beacon_state::BeaconState;
|
pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch};
|
||||||
pub use crate::casper_slashing::CasperSlashing;
|
pub use crate::chain_spec::{ChainSpec, Domain};
|
||||||
pub use crate::chain_spec::ChainSpec;
|
|
||||||
pub use crate::crosslink::Crosslink;
|
pub use crate::crosslink::Crosslink;
|
||||||
pub use crate::deposit::Deposit;
|
pub use crate::deposit::Deposit;
|
||||||
pub use crate::deposit_data::DepositData;
|
pub use crate::deposit_data::DepositData;
|
||||||
pub use crate::deposit_input::DepositInput;
|
pub use crate::deposit_input::DepositInput;
|
||||||
pub use crate::eth1_data::Eth1Data;
|
pub use crate::eth1_data::Eth1Data;
|
||||||
pub use crate::eth1_data_vote::Eth1DataVote;
|
pub use crate::eth1_data_vote::Eth1DataVote;
|
||||||
pub use crate::exit::Exit;
|
|
||||||
pub use crate::fork::Fork;
|
pub use crate::fork::Fork;
|
||||||
pub use crate::free_attestation::FreeAttestation;
|
pub use crate::free_attestation::FreeAttestation;
|
||||||
pub use crate::pending_attestation::PendingAttestation;
|
pub use crate::pending_attestation::PendingAttestation;
|
||||||
pub use crate::proposal_signed_data::ProposalSignedData;
|
pub use crate::proposal::Proposal;
|
||||||
pub use crate::proposer_slashing::ProposerSlashing;
|
pub use crate::proposer_slashing::ProposerSlashing;
|
||||||
pub use crate::slashable_attestation::SlashableAttestation;
|
pub use crate::slashable_attestation::SlashableAttestation;
|
||||||
pub use crate::slashable_vote_data::SlashableVoteData;
|
|
||||||
pub use crate::slot_epoch::{Epoch, Slot};
|
pub use crate::slot_epoch::{Epoch, Slot};
|
||||||
pub use crate::slot_height::SlotHeight;
|
pub use crate::slot_height::SlotHeight;
|
||||||
pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator};
|
pub use crate::transfer::Transfer;
|
||||||
pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock;
|
pub use crate::validator::Validator;
|
||||||
|
pub use crate::voluntary_exit::VoluntaryExit;
|
||||||
|
|
||||||
pub type Hash256 = H256;
|
pub type Hash256 = H256;
|
||||||
pub type Address = H160;
|
pub type Address = H160;
|
||||||
|
@ -2,10 +2,13 @@ use crate::test_utils::TestRandom;
|
|||||||
use crate::{AttestationData, Bitfield, Slot};
|
use crate::{AttestationData, Bitfield, Slot};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode)]
|
/// An attestation that has been included in the state but not yet fully processed.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct PendingAttestation {
|
pub struct PendingAttestation {
|
||||||
pub aggregation_bitfield: Bitfield,
|
pub aggregation_bitfield: Bitfield,
|
||||||
pub data: AttestationData,
|
pub data: AttestationData,
|
||||||
@ -13,33 +16,11 @@ pub struct PendingAttestation {
|
|||||||
pub inclusion_slot: Slot,
|
pub inclusion_slot: Slot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for PendingAttestation {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.aggregation_bitfield.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.data.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.custody_bitfield.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.inclusion_slot.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for PendingAttestation {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
data: <_>::random_for_test(rng),
|
|
||||||
aggregation_bitfield: <_>::random_for_test(rng),
|
|
||||||
custody_bitfield: <_>::random_for_test(rng),
|
|
||||||
inclusion_slot: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
78
eth2/types/src/proposal.rs
Normal file
78
eth2/types/src/proposal.rs
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
use crate::test_utils::TestRandom;
|
||||||
|
use crate::{Hash256, Slot};
|
||||||
|
use bls::Signature;
|
||||||
|
use rand::RngCore;
|
||||||
|
use serde_derive::Serialize;
|
||||||
|
use ssz::TreeHash;
|
||||||
|
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
|
/// A proposal for some shard or beacon block.
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
|
||||||
|
pub struct Proposal {
|
||||||
|
pub slot: Slot,
|
||||||
|
/// Shard number (spec.beacon_chain_shard_number for beacon chain)
|
||||||
|
pub shard: u64,
|
||||||
|
pub block_root: Hash256,
|
||||||
|
pub signature: Signature,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
|
use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_ssz_round_trip() {
|
||||||
|
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||||
|
let original = Proposal::random_for_test(&mut rng);
|
||||||
|
|
||||||
|
let bytes = ssz_encode(&original);
|
||||||
|
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(original, decoded);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_hash_tree_root_internal() {
|
||||||
|
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||||
|
let original = Proposal::random_for_test(&mut rng);
|
||||||
|
|
||||||
|
let result = original.hash_tree_root_internal();
|
||||||
|
|
||||||
|
assert_eq!(result.len(), 32);
|
||||||
|
// TODO: Add further tests
|
||||||
|
// https://github.com/sigp/lighthouse/issues/170
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(TreeHash)]
|
||||||
|
struct SignedProposal {
|
||||||
|
pub slot: Slot,
|
||||||
|
pub shard: u64,
|
||||||
|
pub block_root: Hash256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<SignedProposal> for Proposal {
|
||||||
|
fn into(self) -> SignedProposal {
|
||||||
|
SignedProposal {
|
||||||
|
slot: self.slot,
|
||||||
|
shard: self.shard,
|
||||||
|
block_root: self.block_root,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_signed_root() {
|
||||||
|
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||||
|
let original = Proposal::random_for_test(&mut rng);
|
||||||
|
|
||||||
|
let other: SignedProposal = original.clone().into();
|
||||||
|
|
||||||
|
assert_eq!(original.signed_root(), other.hash_tree_root());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -2,62 +2,19 @@ use crate::test_utils::TestRandom;
|
|||||||
use crate::{Hash256, Slot};
|
use crate::{Hash256, Slot};
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode)]
|
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct ProposalSignedData {
|
pub struct ProposalSignedData {
|
||||||
pub slot: Slot,
|
pub slot: Slot,
|
||||||
pub shard: u64,
|
pub shard: u64,
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TreeHash for ProposalSignedData {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.slot.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.shard.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.block_root.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for ProposalSignedData {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
slot: <_>::random_for_test(rng),
|
|
||||||
shard: <_>::random_for_test(rng),
|
|
||||||
block_root: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
|
||||||
use ssz::{ssz_encode, Decodable};
|
|
||||||
|
|
||||||
#[test]
|
ssz_tests!(ProposalSignedData);
|
||||||
pub fn test_ssz_round_trip() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = ProposalSignedData::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let bytes = ssz_encode(&original);
|
|
||||||
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original, decoded);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_hash_tree_root_internal() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
let original = ProposalSignedData::random_for_test(&mut rng);
|
|
||||||
|
|
||||||
let result = original.hash_tree_root_internal();
|
|
||||||
|
|
||||||
assert_eq!(result.len(), 32);
|
|
||||||
// TODO: Add further tests
|
|
||||||
// https://github.com/sigp/lighthouse/issues/170
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1,49 +1,29 @@
|
|||||||
use super::ProposalSignedData;
|
use super::Proposal;
|
||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use bls::Signature;
|
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use ssz::{hash, TreeHash};
|
use ssz_derive::{Decode, Encode, TreeHash};
|
||||||
use ssz_derive::{Decode, Encode};
|
use test_random_derive::TestRandom;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode)]
|
mod builder;
|
||||||
|
|
||||||
|
pub use builder::ProposerSlashingBuilder;
|
||||||
|
|
||||||
|
/// Two conflicting proposals from the same proposer (validator).
|
||||||
|
///
|
||||||
|
/// Spec v0.4.0
|
||||||
|
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
|
||||||
pub struct ProposerSlashing {
|
pub struct ProposerSlashing {
|
||||||
pub proposer_index: u64,
|
pub proposer_index: u64,
|
||||||
pub proposal_data_1: ProposalSignedData,
|
pub proposal_1: Proposal,
|
||||||
pub proposal_signature_1: Signature,
|
pub proposal_2: Proposal,
|
||||||
pub proposal_data_2: ProposalSignedData,
|
|
||||||
pub proposal_signature_2: Signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for ProposerSlashing {
|
|
||||||
fn hash_tree_root_internal(&self) -> Vec<u8> {
|
|
||||||
let mut result: Vec<u8> = vec![];
|
|
||||||
result.append(&mut self.proposer_index.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.proposal_data_1.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.proposal_signature_1.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.proposal_data_2.hash_tree_root_internal());
|
|
||||||
result.append(&mut self.proposal_signature_2.hash_tree_root_internal());
|
|
||||||
hash(&result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RngCore> TestRandom<T> for ProposerSlashing {
|
|
||||||
fn random_for_test(rng: &mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
proposer_index: <_>::random_for_test(rng),
|
|
||||||
proposal_data_1: <_>::random_for_test(rng),
|
|
||||||
proposal_signature_1: <_>::random_for_test(rng),
|
|
||||||
proposal_data_2: <_>::random_for_test(rng),
|
|
||||||
proposal_signature_2: <_>::random_for_test(rng),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable, TreeHash};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_ssz_round_trip() {
|
pub fn test_ssz_round_trip() {
|
||||||
|
57
eth2/types/src/proposer_slashing/builder.rs
Normal file
57
eth2/types/src/proposer_slashing/builder.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
use crate::*;
|
||||||
|
use ssz::SignedRoot;
|
||||||
|
|
||||||
|
/// Builds a `ProposerSlashing`.
|
||||||
|
pub struct ProposerSlashingBuilder();
|
||||||
|
|
||||||
|
impl ProposerSlashingBuilder {
|
||||||
|
/// Builds a `ProposerSlashing` that is a double vote.
|
||||||
|
///
|
||||||
|
/// The `signer` function is used to sign the double-vote and accepts:
|
||||||
|
///
|
||||||
|
/// - `validator_index: u64`
|
||||||
|
/// - `message: &[u8]`
|
||||||
|
/// - `epoch: Epoch`
|
||||||
|
/// - `domain: Domain`
|
||||||
|
///
|
||||||
|
/// Where domain is a domain "constant" (e.g., `spec.domain_attestation`).
|
||||||
|
pub fn double_vote<F>(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing
|
||||||
|
where
|
||||||
|
F: Fn(u64, &[u8], Epoch, Domain) -> Signature,
|
||||||
|
{
|
||||||
|
let slot = Slot::new(0);
|
||||||
|
let shard = 0;
|
||||||
|
|
||||||
|
let mut proposal_1 = Proposal {
|
||||||
|
slot,
|
||||||
|
shard,
|
||||||
|
block_root: Hash256::from_low_u64_le(1),
|
||||||
|
signature: Signature::empty_signature(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut proposal_2 = Proposal {
|
||||||
|
slot,
|
||||||
|
shard,
|
||||||
|
block_root: Hash256::from_low_u64_le(2),
|
||||||
|
signature: Signature::empty_signature(),
|
||||||
|
};
|
||||||
|
|
||||||
|
proposal_1.signature = {
|
||||||
|
let message = proposal_1.signed_root();
|
||||||
|
let epoch = slot.epoch(spec.slots_per_epoch);
|
||||||
|
signer(proposer_index, &message[..], epoch, Domain::Proposal)
|
||||||
|
};
|
||||||
|
|
||||||
|
proposal_2.signature = {
|
||||||
|
let message = proposal_2.signed_root();
|
||||||
|
let epoch = slot.epoch(spec.slots_per_epoch);
|
||||||
|
signer(proposer_index, &message[..], epoch, Domain::Proposal)
|
||||||
|
};
|
||||||
|
|
||||||
|
ProposerSlashing {
|
||||||
|
proposer_index,
|
||||||
|
proposal_1,
|
||||||
|
proposal_2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -13,7 +13,6 @@ pub trait BeaconBlockReader: Debug + PartialEq {
|
|||||||
fn slot(&self) -> Slot;
|
fn slot(&self) -> Slot;
|
||||||
fn parent_root(&self) -> Hash256;
|
fn parent_root(&self) -> Hash256;
|
||||||
fn state_root(&self) -> Hash256;
|
fn state_root(&self) -> Hash256;
|
||||||
fn canonical_root(&self) -> Hash256;
|
|
||||||
fn into_beacon_block(self) -> Option<BeaconBlock>;
|
fn into_beacon_block(self) -> Option<BeaconBlock>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -30,10 +29,6 @@ impl BeaconBlockReader for BeaconBlock {
|
|||||||
self.state_root
|
self.state_root
|
||||||
}
|
}
|
||||||
|
|
||||||
fn canonical_root(&self) -> Hash256 {
|
|
||||||
self.canonical_root()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_beacon_block(self) -> Option<BeaconBlock> {
|
fn into_beacon_block(self) -> Option<BeaconBlock> {
|
||||||
Some(self)
|
Some(self)
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::{BeaconState, Hash256, Slot};
|
use crate::{BeaconState, Slot};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`.
|
/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`.
|
||||||
@ -11,7 +11,6 @@ use std::fmt::Debug;
|
|||||||
/// "future proofing".
|
/// "future proofing".
|
||||||
pub trait BeaconStateReader: Debug + PartialEq {
|
pub trait BeaconStateReader: Debug + PartialEq {
|
||||||
fn slot(&self) -> Slot;
|
fn slot(&self) -> Slot;
|
||||||
fn canonical_root(&self) -> Hash256;
|
|
||||||
fn into_beacon_state(self) -> Option<BeaconState>;
|
fn into_beacon_state(self) -> Option<BeaconState>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -20,10 +19,6 @@ impl BeaconStateReader for BeaconState {
|
|||||||
self.slot
|
self.slot
|
||||||
}
|
}
|
||||||
|
|
||||||
fn canonical_root(&self) -> Hash256 {
|
|
||||||
self.canonical_root()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_beacon_state(self) -> Option<BeaconState> {
|
fn into_beacon_state(self) -> Option<BeaconState> {
|
||||||
Some(self)
|
Some(self)
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user