Merge branch 'master' into v0.4.0-types

This commit is contained in:
Paul Hauner 2019-03-07 12:03:27 +11:00
commit 93ce7b59e7
No known key found for this signature in database
GPG Key ID: D362883A9218FCC6
35 changed files with 1842 additions and 94 deletions

View File

@ -9,6 +9,7 @@ members = [
"eth2/utils/boolean-bitfield",
"eth2/utils/hashing",
"eth2/utils/honey-badger-split",
"eth2/utils/merkle_proof",
"eth2/utils/int_to_bytes",
"eth2/utils/slot_clock",
"eth2/utils/ssz",

View File

@ -65,6 +65,7 @@ pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
pub slot_clock: U,
pub attestation_aggregator: RwLock<AttestationAggregator>,
pub deposits_for_inclusion: RwLock<Vec<Deposit>>,
pub exits_for_inclusion: RwLock<Vec<Exit>>,
pub proposer_slashings_for_inclusion: RwLock<Vec<ProposerSlashing>>,
pub attester_slashings_for_inclusion: RwLock<Vec<AttesterSlashing>>,
canonical_head: RwLock<CheckPoint>,
@ -134,6 +135,7 @@ where
slot_clock,
attestation_aggregator,
deposits_for_inclusion: RwLock::new(vec![]),
exits_for_inclusion: RwLock::new(vec![]),
proposer_slashings_for_inclusion: RwLock::new(vec![]),
attester_slashings_for_inclusion: RwLock::new(vec![]),
state: RwLock::new(genesis_state),
@ -370,13 +372,17 @@ where
/// Accept some deposit and queue it for inclusion in an appropriate block.
pub fn receive_deposit_for_inclusion(&self, deposit: Deposit) {
// TODO: deposits are not check for validity; check them.
// TODO: deposits are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.deposits_for_inclusion.write().push(deposit);
}
/// Return a vec of deposits suitable for inclusion in some block.
pub fn get_deposits_for_block(&self) -> Vec<Deposit> {
// TODO: deposits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.deposits_for_inclusion.read().clone()
}
@ -386,6 +392,8 @@ where
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_deposits_as_included(&self, included_deposits: &[Deposit]) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_deposits {
@ -402,9 +410,49 @@ where
}
}
/// Accept some exit and queue it for inclusion in an appropriate block.
pub fn receive_exit_for_inclusion(&self, exit: Exit) {
// TODO: exits are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.exits_for_inclusion.write().push(exit);
}
/// Return a vec of exits suitable for inclusion in some block.
pub fn get_exits_for_block(&self) -> Vec<Exit> {
// TODO: exits are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.exits_for_inclusion.read().clone()
}
/// Takes a list of `Deposits` that were included in recent blocks and removes them from the
/// inclusion queue.
///
/// This ensures that `Deposits` are not included twice in successive blocks.
pub fn set_exits_as_included(&self, included_exits: &[Exit]) {
// TODO: method does not take forks into account; consider this.
let mut indices_to_delete = vec![];
for included in included_exits {
for (i, for_inclusion) in self.exits_for_inclusion.read().iter().enumerate() {
if included == for_inclusion {
indices_to_delete.push(i);
}
}
}
let exits_for_inclusion = &mut self.exits_for_inclusion.write();
for i in indices_to_delete {
exits_for_inclusion.remove(i);
}
}
/// Accept some proposer slashing and queue it for inclusion in an appropriate block.
pub fn receive_proposer_slashing_for_inclusion(&self, proposer_slashing: ProposerSlashing) {
// TODO: proposer_slashings are not check for validity; check them.
// TODO: proposer_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.proposer_slashings_for_inclusion
.write()
.push(proposer_slashing);
@ -413,6 +461,8 @@ where
/// Return a vec of proposer slashings suitable for inclusion in some block.
pub fn get_proposer_slashings_for_block(&self) -> Vec<ProposerSlashing> {
// TODO: proposer_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.proposer_slashings_for_inclusion.read().clone()
}
@ -425,6 +475,8 @@ where
included_proposer_slashings: &[ProposerSlashing],
) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_proposer_slashings {
@ -448,7 +500,9 @@ where
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
pub fn receive_attester_slashing_for_inclusion(&self, attester_slashing: AttesterSlashing) {
// TODO: attester_slashings are not check for validity; check them.
// TODO: attester_slashings are not checked for validity; check them.
//
// https://github.com/sigp/lighthouse/issues/276
self.attester_slashings_for_inclusion
.write()
.push(attester_slashing);
@ -457,6 +511,8 @@ where
/// Return a vec of attester slashings suitable for inclusion in some block.
pub fn get_attester_slashings_for_block(&self) -> Vec<AttesterSlashing> {
// TODO: attester_slashings are indiscriminately included; check them for validity.
//
// https://github.com/sigp/lighthouse/issues/275
self.attester_slashings_for_inclusion.read().clone()
}
@ -469,6 +525,8 @@ where
included_attester_slashings: &[AttesterSlashing],
) {
// TODO: method does not take forks into account; consider this.
//
// https://github.com/sigp/lighthouse/issues/275
let mut indices_to_delete = vec![];
for included in included_attester_slashings {
@ -678,7 +736,7 @@ where
attester_slashings: self.get_attester_slashings_for_block(),
attestations,
deposits: self.get_deposits_for_block(),
exits: vec![],
exits: self.get_exits_for_block(),
},
};

View File

@ -18,6 +18,10 @@ test_cases:
amount: 32
- slot: 5
amount: 32
exits:
# At slot 10, submit an exit for validator #50.
- slot: 10
validator_index: 50
proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42.
- slot: 2
@ -39,4 +43,5 @@ test_cases:
num_validators: 1003
slashed_validators: [11, 12, 13, 14, 42]
exited_validators: []
exit_initiated_validators: [50]

View File

@ -280,6 +280,15 @@ impl BeaconChainHarness {
}
}
/// Submit an exit to the `BeaconChain` for inclusion in some block.
///
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
/// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest.
pub fn add_exit(&mut self, exit: Exit) {
self.beacon_chain.receive_exit_for_inclusion(exit);
}
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain

View File

@ -4,6 +4,7 @@
use crate::beacon_chain_harness::BeaconChainHarness;
use beacon_chain::CheckPoint;
use log::{info, warn};
use ssz::TreeHash;
use types::*;
use types::{
attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder,
@ -121,6 +122,20 @@ impl TestCase {
}
}
// Feed exits to the BeaconChain.
if let Some(ref exits) = self.config.exits {
for (slot, validator_index) in exits {
if *slot == slot_height {
info!(
"Including exit at slot height {} for validator {}.",
slot_height, validator_index
);
let exit = build_exit(&harness, *validator_index);
harness.add_exit(exit);
}
}
}
// Build a block or skip a slot.
match self.config.skip_slots {
Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
@ -185,6 +200,33 @@ impl TestCase {
}
}
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> Exit {
let epoch = harness
.beacon_chain
.state
.read()
.current_epoch(&harness.spec);
let mut exit = Exit {
epoch,
validator_index,
signature: Signature::empty_signature(),
};
let message = exit.hash_tree_root();
exit.signature = harness
.validator_sign(
validator_index as usize,
&message[..],
epoch,
harness.spec.domain_exit,
)
.expect("Unable to sign Exit");
exit
}
/// Builds an `AttesterSlashing` for some `validator_indices`.
///
/// Signs the message using a `BeaconChainHarness`.

View File

@ -3,9 +3,13 @@ use bls::create_proof_of_possession;
use types::*;
use yaml_rust::Yaml;
pub type DepositTuple = (u64, Deposit, Keypair);
pub type ProposerSlashingTuple = (u64, u64);
pub type AttesterSlashingTuple = (u64, Vec<u64>);
pub type ValidatorIndex = u64;
pub type ValidatorIndices = Vec<u64>;
pub type DepositTuple = (SlotHeight, Deposit, Keypair);
pub type ExitTuple = (SlotHeight, ValidatorIndex);
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
/// Defines the execution of a `BeaconStateHarness` across a series of slots.
#[derive(Debug)]
@ -24,6 +28,8 @@ pub struct Config {
pub proposer_slashings: Option<Vec<ProposerSlashingTuple>>,
/// Attester slashings to be including during execution.
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
/// Exits to be including during execution.
pub exits: Option<Vec<ExitTuple>>,
}
impl Config {
@ -40,10 +46,26 @@ impl Config {
deposits: parse_deposits(&yaml),
proposer_slashings: parse_proposer_slashings(&yaml),
attester_slashings: parse_attester_slashings(&yaml),
exits: parse_exits(&yaml),
}
}
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
let mut tuples = vec![];
for exit in yaml["exits"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
let validator_index =
as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
tuples.push((SlotHeight::from(slot), validator_index));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
let mut slashings = vec![];
@ -53,7 +75,7 @@ fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
let validator_indices = as_vec_u64(slashing, "validator_indices")
.expect("Incomplete attester_slashing (validator_indices)");
slashings.push((slot, validator_indices));
slashings.push((SlotHeight::from(slot), validator_indices));
}
Some(slashings)
@ -68,7 +90,7 @@ fn parse_proposer_slashings(yaml: &Yaml) -> Option<Vec<ProposerSlashingTuple>> {
let validator_index = as_u64(slashing, "validator_index")
.expect("Incomplete proposer slashing (validator_index)");
slashings.push((slot, validator_index));
slashings.push((SlotHeight::from(slot), validator_index));
}
Some(slashings)
@ -102,7 +124,7 @@ fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
},
};
deposits.push((slot, deposit, keypair));
deposits.push((SlotHeight::from(slot), deposit, keypair));
}
Some(deposits)

View File

@ -13,8 +13,10 @@ pub struct StateCheck {
pub num_validators: Option<usize>,
/// A list of validator indices which have been penalized. Must be in ascending order.
pub slashed_validators: Option<Vec<u64>>,
/// A list of validator indices which have been exited. Must be in ascending order.
/// A list of validator indices which have been fully exited. Must be in ascending order.
pub exited_validators: Option<Vec<u64>>,
/// A list of validator indices which have had an exit initiated. Must be in ascending order.
pub exit_initiated_validators: Option<Vec<u64>>,
}
impl StateCheck {
@ -27,6 +29,7 @@ impl StateCheck {
num_validators: as_usize(&yaml, "num_validators"),
slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
}
}
@ -40,6 +43,7 @@ impl StateCheck {
info!("Running state check for slot height {}.", self.slot);
// Check the state slot.
assert_eq!(
self.slot,
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
@ -55,6 +59,7 @@ impl StateCheck {
info!("OK: num_validators = {}.", num_validators);
}
// Check for slashed validators.
if let Some(ref slashed_validators) = self.slashed_validators {
let actually_slashed_validators: Vec<u64> = state
.validator_registry
@ -75,6 +80,7 @@ impl StateCheck {
info!("OK: slashed_validators = {:?}.", slashed_validators);
}
// Check for exited validators.
if let Some(ref exited_validators) = self.exited_validators {
let actually_exited_validators: Vec<u64> = state
.validator_registry
@ -94,5 +100,29 @@ impl StateCheck {
);
info!("OK: exited_validators = {:?}.", exited_validators);
}
// Check for validators that have initiated exit.
if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
let actual: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.has_initiated_exit() {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actual, *exit_initiated_validators,
"Exit initiated validators != expected."
);
info!(
"OK: exit_initiated_validators = {:?}.",
exit_initiated_validators
);
}
}
}

View File

@ -134,9 +134,9 @@ mod tests {
let store = BeaconBlockStore::new(db.clone());
let ssz = "definitly not a valid block".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(
store.block_at_slot(hash, Slot::from(42_u64)),
Err(BeaconBlockAtSlotError::DBError(
@ -151,10 +151,10 @@ mod tests {
let store = BeaconBlockStore::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(
store.block_at_slot(other_hash, Slot::from(42_u64)),
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash))
@ -169,18 +169,15 @@ mod tests {
let thread_count = 10;
let write_count = 10;
// We're expecting the product of these numbers to fit in one byte.
assert!(thread_count * write_count <= 255);
let mut handles = vec![];
for t in 0..thread_count {
let wc = write_count;
let bs = bs.clone();
let handle = thread::spawn(move || {
for w in 0..wc {
let key = (t * w) as u8;
let key = t * w;
let val = 42;
bs.put(&[key][..].into(), &vec![val]).unwrap();
bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap();
}
});
handles.push(handle);
@ -192,9 +189,9 @@ mod tests {
for t in 0..thread_count {
for w in 0..write_count {
let key = (t * w) as u8;
assert!(bs.exists(&[key][..].into()).unwrap());
let val = bs.get(&[key][..].into()).unwrap().unwrap();
let key = t * w;
assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap());
let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap();
assert_eq!(vec![42], val);
}
}
@ -208,19 +205,20 @@ mod tests {
// Specify test block parameters.
let hashes = [
Hash256::from(&[0; 32][..]),
Hash256::from(&[1; 32][..]),
Hash256::from(&[2; 32][..]),
Hash256::from(&[3; 32][..]),
Hash256::from(&[4; 32][..]),
Hash256::from([0; 32]),
Hash256::from([1; 32]),
Hash256::from([2; 32]),
Hash256::from([3; 32]),
Hash256::from([4; 32]),
];
let parent_hashes = [
Hash256::from(&[255; 32][..]), // Genesis block.
Hash256::from(&[0; 32][..]),
Hash256::from(&[1; 32][..]),
Hash256::from(&[2; 32][..]),
Hash256::from(&[3; 32][..]),
Hash256::from([255; 32]), // Genesis block.
Hash256::from([0; 32]),
Hash256::from([1; 32]),
Hash256::from([2; 32]),
Hash256::from([3; 32]),
];
let unknown_hash = Hash256::from([101; 32]); // different from all above
let slots: Vec<Slot> = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect();
// Generate a vec of random blocks and store them in the DB.
@ -233,7 +231,7 @@ mod tests {
block.slot = slots[i];
let ssz = ssz_encode(&block);
db.put(DB_COLUMN, &hashes[i], &ssz).unwrap();
db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap();
blocks.push(block);
}
@ -255,11 +253,10 @@ mod tests {
let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap();
assert_eq!(ssz, None);
let bad_hash = &Hash256::from("unknown".as_bytes());
let ssz = bs.block_at_slot(bad_hash, Slot::new(2));
let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2));
assert_eq!(
ssz,
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash))
Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash))
);
}
}

View File

@ -2,25 +2,25 @@ macro_rules! impl_crud_for_store {
($store: ident, $db_column: expr) => {
impl<T: ClientDB> $store<T> {
pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> {
self.db.put($db_column, hash, ssz)
self.db.put($db_column, hash.as_bytes(), ssz)
}
pub fn get(&self, hash: &Hash256) -> Result<Option<Vec<u8>>, DBError> {
self.db.get($db_column, hash)
self.db.get($db_column, hash.as_bytes())
}
pub fn exists(&self, hash: &Hash256) -> Result<bool, DBError> {
self.db.exists($db_column, hash)
self.db.exists($db_column, hash.as_bytes())
}
pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> {
self.db.delete($db_column, hash)
self.db.delete($db_column, hash.as_bytes())
}
}
};
}
#[allow(unused_macros)]
#[cfg(test)]
macro_rules! test_crud_for_store {
($store: ident, $db_column: expr) => {
#[test]
@ -29,10 +29,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
store.put(hash, ssz).unwrap();
assert_eq!(db.get(DB_COLUMN, hash).unwrap().unwrap(), ssz);
assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz);
}
#[test]
@ -41,9 +41,9 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert_eq!(store.get(hash).unwrap().unwrap(), ssz);
}
@ -53,10 +53,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, other_hash, ssz).unwrap();
db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap();
assert_eq!(store.get(hash).unwrap(), None);
}
@ -66,9 +66,9 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(store.exists(hash).unwrap());
}
@ -78,10 +78,10 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let other_hash = &Hash256::from("another hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
let other_hash = &Hash256::from([0xBB; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(!store.exists(other_hash).unwrap());
}
@ -91,13 +91,13 @@ macro_rules! test_crud_for_store {
let store = $store::new(db.clone());
let ssz = "some bytes".as_bytes();
let hash = &Hash256::from("some hash".as_bytes());
let hash = &Hash256::from([0xAA; 32]);
db.put(DB_COLUMN, hash, ssz).unwrap();
assert!(db.exists(DB_COLUMN, hash).unwrap());
db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap();
assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
store.delete(hash).unwrap();
assert!(!db.exists(DB_COLUMN, hash).unwrap());
assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap());
}
};
}

View File

@ -37,7 +37,7 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
store.put_block_hash(hash).unwrap();
assert!(db.exists(DB_COLUMN, hash).unwrap());
@ -48,7 +48,7 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(store.block_hash_exists(hash).unwrap());
@ -59,8 +59,8 @@ mod tests {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from("some hash".as_bytes()).to_vec();
let other_hash = &Hash256::from("another hash".as_bytes()).to_vec();
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(!store.block_hash_exists(other_hash).unwrap());

View File

@ -67,7 +67,7 @@ into individual crates wherever possible.
Generally, tests can be kept in the same file, as is typical in Rust.
Integration tests should be placed in the `tests` directory in the crate's
root. Particularity large (line-count) tests should be placed into a separate
root. Particularly large (line-count) tests should be placed into a separate
file.
A function is not considered complete until a test exists for it. We produce

View File

@ -122,7 +122,7 @@ project.
* **Module**: A collection of items: functions, structs, traits, and even other
modules. Modules allow you to hierarchically split code into logical units
and manage visibility.
* **Attribute**: Metadaata applied to some module, crate or item.
* **Attribute**: Metadata applied to some module, crate or item.
* **Macros**: Macros are powerful meta-programming statements that get expanded
into source code that gets compiled with the rest of the code (Unlike `C`
macros that are pre-processed, Rust macros form an Abstract Syntax Tree).
@ -185,7 +185,7 @@ check your code.
| Function / Method | ``snake_case`` |
| Macro Names | ``snake_case`` |
| Constants | ``SCREAMING_SNAKE_CASE`` |
| Forbidden name | Trialing Underscore: ``name_`` |
| Forbidden name | Trailing Underscore: ``name_`` |
Other general rust docs:

View File

@ -210,7 +210,7 @@ where
trace!("Child vote length: {}", votes.len());
for (candidate, votes) in votes.iter() {
let candidate_bit: BitVec = BitVec::from_bytes(&candidate);
let candidate_bit: BitVec = BitVec::from_bytes(candidate.as_bytes());
// if the bitmasks don't match, exclude candidate
if !bitmask.iter().eq(candidate_bit.iter().take(bit)) {

View File

@ -0,0 +1,455 @@
use self::verify_slashable_attestation::verify_slashable_attestation;
use crate::SlotProcessingError;
use hashing::hash;
use int_to_bytes::int_to_bytes32;
use log::{debug, trace};
use ssz::{ssz_encode, TreeHash};
use types::*;
mod verify_slashable_attestation;
const PHASE_0_CUSTODY_BIT: bool = false;
#[derive(Debug, PartialEq)]
pub enum Error {
DBError(String),
StateAlreadyTransitioned,
PresentSlotIsNone,
UnableToDecodeBlock,
MissingParentState(Hash256),
InvalidParentState(Hash256),
MissingBeaconBlock(Hash256),
InvalidBeaconBlock(Hash256),
MissingParentBlock(Hash256),
StateSlotMismatch,
BadBlockSignature,
BadRandaoSignature,
MaxProposerSlashingsExceeded,
BadProposerSlashing,
MaxAttesterSlashingsExceed,
MaxAttestationsExceeded,
BadAttesterSlashing,
InvalidAttestation(AttestationValidationError),
NoBlockRoot,
MaxDepositsExceeded,
BadDeposit,
MaxExitsExceeded,
BadExit,
BadCustodyReseeds,
BadCustodyChallenges,
BadCustodyResponses,
BeaconStateError(BeaconStateError),
SlotProcessingError(SlotProcessingError),
}
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
IncludedTooEarly,
IncludedTooLate,
WrongJustifiedSlot,
WrongJustifiedRoot,
BadLatestCrosslinkRoot,
BadSignature,
ShardBlockRootNotZero,
NoBlockRoot,
BeaconStateError(BeaconStateError),
}
macro_rules! ensure {
($condition: expr, $result: expr) => {
if !$condition {
return Err($result);
}
};
}
pub trait BlockProcessable {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>;
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error>;
}
impl BlockProcessable for BeaconState {
fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, true, spec)
}
fn per_block_processing_without_verifying_block_signature(
&mut self,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(self, block, false, spec)
}
}
fn per_block_processing_signature_optional(
mut state: &mut BeaconState,
block: &BeaconBlock,
verify_block_signature: bool,
spec: &ChainSpec,
) -> Result<(), Error> {
ensure!(block.slot == state.slot, Error::StateSlotMismatch);
// Building the previous epoch could be delayed until an attestation from a previous epoch is
// included. This is left for future optimisation.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
/*
* Proposer Signature
*/
let block_proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
let block_proposer = &state.validator_registry[block_proposer_index];
if verify_block_signature {
ensure!(
bls_verify(
&block_proposer.pubkey,
&block.proposal_root(spec)[..],
&block.signature,
get_domain(&state.fork, state.current_epoch(spec), spec.domain_proposal)
),
Error::BadBlockSignature
);
}
/*
* RANDAO
*/
ensure!(
bls_verify(
&block_proposer.pubkey,
&int_to_bytes32(state.current_epoch(spec).as_u64()),
&block.randao_reveal,
get_domain(&state.fork, state.current_epoch(spec), spec.domain_randao)
),
Error::BadRandaoSignature
);
// TODO: check this is correct.
let new_mix = {
let mut mix = state.latest_randao_mixes
[state.slot.as_usize() % spec.latest_randao_mixes_length]
.as_bytes()
.to_vec();
mix.append(&mut ssz_encode(&block.randao_reveal));
Hash256::from_slice(&hash(&mix)[..])
};
state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix;
/*
* Eth1 data
*/
// TODO: Eth1 data processing.
/*
* Proposer slashings
*/
ensure!(
block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
Error::MaxProposerSlashingsExceeded
);
for proposer_slashing in &block.body.proposer_slashings {
let proposer = state
.validator_registry
.get(proposer_slashing.proposer_index as usize)
.ok_or(Error::BadProposerSlashing)?;
ensure!(
proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard,
Error::BadProposerSlashing
);
ensure!(
proposer_slashing.proposal_data_1.block_root
!= proposer_slashing.proposal_data_2.block_root,
Error::BadProposerSlashing
);
ensure!(
proposer.penalized_epoch > state.current_epoch(spec),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_1.hash_tree_root(),
&proposer_slashing.proposal_signature_1,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_1
.slot
.epoch(spec.epoch_length),
spec.domain_proposal
)
),
Error::BadProposerSlashing
);
ensure!(
bls_verify(
&proposer.pubkey,
&proposer_slashing.proposal_data_2.hash_tree_root(),
&proposer_slashing.proposal_signature_2,
get_domain(
&state.fork,
proposer_slashing
.proposal_data_2
.slot
.epoch(spec.epoch_length),
spec.domain_proposal
)
),
Error::BadProposerSlashing
);
state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?;
}
/*
* Attester slashings
*/
ensure!(
block.body.attester_slashings.len() as u64 <= spec.max_attester_slashings,
Error::MaxAttesterSlashingsExceed
);
for attester_slashing in &block.body.attester_slashings {
verify_slashable_attestation(&mut state, &attester_slashing, spec)?;
}
/*
* Attestations
*/
ensure!(
block.body.attestations.len() as u64 <= spec.max_attestations,
Error::MaxAttestationsExceeded
);
debug!("Verifying {} attestations.", block.body.attestations.len());
for attestation in &block.body.attestations {
validate_attestation(&state, attestation, spec)?;
let pending_attestation = PendingAttestation {
data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot,
};
state.latest_attestations.push(pending_attestation);
}
/*
* Deposits
*/
ensure!(
block.body.deposits.len() as u64 <= spec.max_deposits,
Error::MaxDepositsExceeded
);
// TODO: verify deposit merkle branches.
for deposit in &block.body.deposits {
debug!(
"Processing deposit for pubkey {:?}",
deposit.deposit_data.deposit_input.pubkey
);
state
.process_deposit(
deposit.deposit_data.deposit_input.pubkey.clone(),
deposit.deposit_data.amount,
deposit
.deposit_data
.deposit_input
.proof_of_possession
.clone(),
deposit.deposit_data.deposit_input.withdrawal_credentials,
None,
spec,
)
.map_err(|_| Error::BadDeposit)?;
}
/*
* Exits
*/
ensure!(
block.body.exits.len() as u64 <= spec.max_exits,
Error::MaxExitsExceeded
);
for exit in &block.body.exits {
let validator = state
.validator_registry
.get(exit.validator_index as usize)
.ok_or(Error::BadExit)?;
ensure!(
validator.exit_epoch
> state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec),
Error::BadExit
);
ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit);
let exit_message = {
let exit_struct = Exit {
epoch: exit.epoch,
validator_index: exit.validator_index,
signature: spec.empty_signature.clone(),
};
exit_struct.hash_tree_root()
};
ensure!(
bls_verify(
&validator.pubkey,
&exit_message,
&exit.signature,
get_domain(&state.fork, exit.epoch, spec.domain_exit)
),
Error::BadProposerSlashing
);
state.initiate_validator_exit(exit.validator_index as usize);
}
debug!("State transition complete.");
Ok(())
}
pub fn validate_attestation(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, true)
}
pub fn validate_attestation_without_signature(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
) -> Result<(), AttestationValidationError> {
validate_attestation_signature_optional(state, attestation, spec, false)
}
fn validate_attestation_signature_optional(
state: &BeaconState,
attestation: &Attestation,
spec: &ChainSpec,
verify_signature: bool,
) -> Result<(), AttestationValidationError> {
trace!(
"validate_attestation_signature_optional: attestation epoch: {}",
attestation.data.slot.epoch(spec.epoch_length)
);
ensure!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
AttestationValidationError::IncludedTooEarly
);
ensure!(
attestation.data.slot + spec.epoch_length >= state.slot,
AttestationValidationError::IncludedTooLate
);
if attestation.data.slot >= state.current_epoch_start_slot(spec) {
ensure!(
attestation.data.justified_epoch == state.justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
} else {
ensure!(
attestation.data.justified_epoch == state.previous_justified_epoch,
AttestationValidationError::WrongJustifiedSlot
);
}
ensure!(
attestation.data.justified_block_root
== *state
.get_block_root(
attestation
.data
.justified_epoch
.start_slot(spec.epoch_length),
&spec
)
.ok_or(AttestationValidationError::NoBlockRoot)?,
AttestationValidationError::WrongJustifiedRoot
);
let potential_crosslink = Crosslink {
shard_block_root: attestation.data.shard_block_root,
epoch: attestation.data.slot.epoch(spec.epoch_length),
};
ensure!(
(attestation.data.latest_crosslink
== state.latest_crosslinks[attestation.data.shard as usize])
| (attestation.data.latest_crosslink == potential_crosslink),
AttestationValidationError::BadLatestCrosslinkRoot
);
if verify_signature {
let participants = state.get_attestation_participants(
&attestation.data,
&attestation.aggregation_bitfield,
spec,
)?;
trace!(
"slot: {}, shard: {}, participants: {:?}",
attestation.data.slot,
attestation.data.shard,
participants
);
let mut group_public_key = AggregatePublicKey::new();
for participant in participants {
group_public_key.add(&state.validator_registry[participant as usize].pubkey)
}
ensure!(
attestation.verify_signature(
&group_public_key,
PHASE_0_CUSTODY_BIT,
get_domain(
&state.fork,
attestation.data.slot.epoch(spec.epoch_length),
spec.domain_attestation,
)
),
AttestationValidationError::BadSignature
);
}
ensure!(
attestation.data.shard_block_root == spec.zero_hash,
AttestationValidationError::ShardBlockRootNotZero
);
Ok(())
}
fn get_domain(fork: &Fork, epoch: Epoch, domain_type: u64) -> u64 {
fork.get_domain(epoch, domain_type)
}
fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, domain: u64) -> bool {
signature.verify(message, domain, pubkey)
}
impl From<AttestationValidationError> for Error {
fn from(e: AttestationValidationError) -> Error {
Error::InvalidAttestation(e)
}
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<SlotProcessingError> for Error {
fn from(e: SlotProcessingError) -> Error {
Error::SlotProcessingError(e)
}
}
impl From<BeaconStateError> for AttestationValidationError {
fn from(e: BeaconStateError) -> AttestationValidationError {
AttestationValidationError::BeaconStateError(e)
}
}

View File

@ -0,0 +1,723 @@
use integer_sqrt::IntegerSquareRoot;
use log::{debug, trace};
use rayon::prelude::*;
use ssz::TreeHash;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
use types::{
validator_registry::get_active_validator_indices, BeaconState, BeaconStateError, ChainSpec,
Crosslink, Epoch, Hash256, InclusionError, PendingAttestation, RelativeEpoch,
};
mod tests;
macro_rules! safe_add_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_add($b);
};
}
macro_rules! safe_sub_assign {
($a: expr, $b: expr) => {
$a = $a.saturating_sub($b);
};
}
#[derive(Debug, PartialEq)]
pub enum Error {
UnableToDetermineProducer,
NoBlockRoots,
BaseRewardQuotientIsZero,
NoRandaoSeed,
BeaconStateError(BeaconStateError),
InclusionError(InclusionError),
WinningRootError(WinningRootError),
}
#[derive(Debug, PartialEq)]
pub enum WinningRootError {
NoWinningRoot,
BeaconStateError(BeaconStateError),
}
#[derive(Clone)]
pub struct WinningRoot {
pub shard_block_root: Hash256,
pub attesting_validator_indices: Vec<usize>,
pub total_balance: u64,
pub total_attesting_balance: u64,
}
pub trait EpochProcessable {
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>;
}
impl EpochProcessable for BeaconState {
// Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it
// remains monolithic to allow for easier spec updates. Once the spec is more stable we can
// optimise.
#[allow(clippy::cyclomatic_complexity)]
fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = self.current_epoch(spec);
let previous_epoch = self.previous_epoch(spec);
let next_epoch = self.next_epoch(spec);
debug!(
"Starting per-epoch processing on epoch {}...",
self.current_epoch(spec)
);
// Ensure all of the caches are built.
self.build_epoch_cache(RelativeEpoch::Previous, spec)?;
self.build_epoch_cache(RelativeEpoch::Current, spec)?;
self.build_epoch_cache(RelativeEpoch::Next, spec)?;
/*
* Validators attesting during the current epoch.
*/
let active_validator_indices = get_active_validator_indices(
&self.validator_registry,
self.slot.epoch(spec.epoch_length),
);
let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec);
trace!(
"{} validators with a total balance of {} wei.",
active_validator_indices.len(),
current_total_balance
);
let current_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.current_epoch(spec)
})
.collect();
trace!(
"Current epoch attestations: {}",
current_epoch_attestations.len()
);
let current_epoch_boundary_attestations: Vec<&PendingAttestation> =
current_epoch_attestations
.par_iter()
.filter(
|a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) {
Some(block_root) => {
(a.data.epoch_boundary_root == *block_root)
&& (a.data.justified_epoch == self.justified_epoch)
}
None => unreachable!(),
},
)
.cloned()
.collect();
let current_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&current_epoch_boundary_attestations[..], spec)?;
let current_epoch_boundary_attesting_balance =
self.get_total_balance(&current_epoch_boundary_attester_indices[..], spec);
trace!(
"Current epoch boundary attesters: {}",
current_epoch_boundary_attester_indices.len()
);
/*
* Validators attesting during the previous epoch
*/
/*
* Validators that made an attestation during the previous epoch
*/
let previous_epoch_attestations: Vec<&PendingAttestation> = self
.latest_attestations
.par_iter()
.filter(|a| {
//TODO: ensure these saturating subs are correct.
(a.data.slot / spec.epoch_length).epoch(spec.epoch_length)
== self.previous_epoch(spec)
})
.collect();
debug!(
"previous epoch attestations: {}",
previous_epoch_attestations.len()
);
let previous_epoch_attester_indices =
self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?;
let previous_total_balance = self.get_total_balance(
&get_active_validator_indices(&self.validator_registry, previous_epoch),
spec,
);
/*
* Validators targetting the previous justified slot
*/
let previous_epoch_justified_attestations: Vec<&PendingAttestation> = {
let mut a: Vec<&PendingAttestation> = current_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
let mut b: Vec<&PendingAttestation> = previous_epoch_attestations
.iter()
.filter(|a| a.data.justified_epoch == self.previous_justified_epoch)
.cloned()
.collect();
a.append(&mut b);
a
};
let previous_epoch_justified_attester_indices = self
.get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?;
let previous_epoch_justified_attesting_balance =
self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec);
/*
* Validators justifying the epoch boundary block at the start of the previous epoch
*/
let previous_epoch_boundary_attestations: Vec<&PendingAttestation> =
previous_epoch_justified_attestations
.iter()
.filter(
|a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) {
Some(block_root) => a.data.epoch_boundary_root == *block_root,
None => unreachable!(),
},
)
.cloned()
.collect();
let previous_epoch_boundary_attester_indices = self
.get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?;
let previous_epoch_boundary_attesting_balance =
self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec);
/*
* Validators attesting to the expected beacon chain head during the previous epoch.
*/
let previous_epoch_head_attestations: Vec<&PendingAttestation> =
previous_epoch_attestations
.iter()
.filter(|a| match self.get_block_root(a.data.slot, spec) {
Some(block_root) => a.data.beacon_block_root == *block_root,
None => unreachable!(),
})
.cloned()
.collect();
let previous_epoch_head_attester_indices =
self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?;
let previous_epoch_head_attesting_balance =
self.get_total_balance(&previous_epoch_head_attester_indices[..], spec);
debug!(
"previous_epoch_head_attester_balance of {} wei.",
previous_epoch_head_attesting_balance
);
/*
* Eth1 Data
*/
if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 {
for eth1_data_vote in &self.eth1_data_votes {
if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period {
self.latest_eth1_data = eth1_data_vote.eth1_data.clone();
}
}
self.eth1_data_votes = vec![];
}
/*
* Justification
*/
let mut new_justified_epoch = self.justified_epoch;
self.justification_bitfield <<= 1;
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 2nd bit of the bitfield.
// - Set the previous epoch to be justified.
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 2;
new_justified_epoch = previous_epoch;
trace!(">= 2/3 voted for previous epoch boundary");
}
// If > 2/3 of the total balance attested to the previous epoch boundary
//
// - Set the 1st bit of the bitfield.
// - Set the current epoch to be justified.
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) {
self.justification_bitfield |= 1;
new_justified_epoch = current_epoch;
trace!(">= 2/3 voted for current epoch boundary");
}
// If:
//
// - All three epochs prior to this epoch have been justified.
// - The previous justified justified epoch was three epochs ago.
//
// Then, set the finalized epoch to be three epochs ago.
if ((self.justification_bitfield >> 1) % 8 == 0b111)
& (self.previous_justified_epoch == previous_epoch - 2)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 3 was finalized (1st condition).");
}
// If:
//
// - Both two epochs prior to this epoch have been justified.
// - The previous justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if ((self.justification_bitfield >> 1) % 4 == 0b11)
& (self.previous_justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.previous_justified_epoch;
trace!("epoch - 2 was finalized (2nd condition).");
}
// If:
//
// - This epoch and the two prior have been justified.
// - The presently justified epoch was two epochs ago.
//
// Then, set the finalized epoch to two epochs ago.
if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1)
{
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 2 was finalized (3rd condition).");
}
// If:
//
// - This epoch and the epoch prior to it have been justified.
// - Set the previous epoch to be justified.
//
// Then, set the finalized epoch to be the previous epoch.
if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) {
self.finalized_epoch = self.justified_epoch;
trace!("epoch - 1 was finalized (4th condition).");
}
self.previous_justified_epoch = self.justified_epoch;
self.justified_epoch = new_justified_epoch;
debug!(
"Finalized epoch {}, justified epoch {}.",
self.finalized_epoch, self.justified_epoch
);
/*
* Crosslinks
*/
// Cached for later lookups.
let mut winning_root_for_shards: HashMap<u64, Result<WinningRoot, WinningRootError>> =
HashMap::new();
// for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot {
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
trace!(
"Finding winning root for slot: {} (epoch: {})",
slot,
slot.epoch(spec.epoch_length)
);
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
// `self.balances`.
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
let winning_root = winning_root(
self,
shard,
&current_epoch_attestations,
&previous_epoch_attestations,
spec,
);
if let Ok(winning_root) = &winning_root {
let total_committee_balance =
self.get_total_balance(&crosslink_committee[..], spec);
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
self.latest_crosslinks[shard as usize] = Crosslink {
epoch: current_epoch,
shard_block_root: winning_root.shard_block_root,
}
}
}
winning_root_for_shards.insert(shard, winning_root);
}
}
trace!(
"Found {} winning shard roots.",
winning_root_for_shards.len()
);
/*
* Rewards and Penalities
*/
let base_reward_quotient =
previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero);
}
/*
* Justification and finalization
*/
let epochs_since_finality = next_epoch - self.finalized_epoch;
let previous_epoch_justified_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned());
let previous_epoch_boundary_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned());
let previous_epoch_head_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned());
let previous_epoch_attester_indices_hashset: HashSet<usize> =
HashSet::from_iter(previous_epoch_attester_indices.iter().cloned());
let active_validator_indices_hashset: HashSet<usize> =
HashSet::from_iter(active_validator_indices.iter().cloned());
debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len());
debug!("{} epochs since finality.", epochs_since_finality);
if epochs_since_finality <= 4 {
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_justified_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_boundary_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
if previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * previous_epoch_head_attesting_balance
/ previous_total_balance
);
} else if active_validator_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_add_assign!(
self.validator_balances[index],
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
)
}
} else {
for index in 0..self.validator_balances.len() {
let inactivity_penalty = self.inactivity_penalty(
index,
epochs_since_finality,
base_reward_quotient,
spec,
);
if active_validator_indices_hashset.contains(&index) {
if !previous_epoch_justified_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_boundary_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if !previous_epoch_head_attester_indices_hashset.contains(&index) {
safe_sub_assign!(self.validator_balances[index], inactivity_penalty);
}
if self.validator_registry[index].penalized_epoch <= current_epoch {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(
self.validator_balances[index],
2 * inactivity_penalty + base_reward
);
}
}
}
for index in previous_epoch_attester_indices {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
self.inclusion_distance(&previous_epoch_attestations, index, spec)?;
safe_sub_assign!(
self.validator_balances[index],
base_reward
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
);
}
}
trace!("Processed validator justification and finalization rewards/penalities.");
/*
* Attestation inclusion
*/
for &index in &previous_epoch_attester_indices_hashset {
let inclusion_slot =
self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?;
let proposer_index = self
.get_beacon_proposer_index(inclusion_slot, spec)
.map_err(|_| Error::UnableToDetermineProducer)?;
let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[proposer_index],
base_reward / spec.includer_reward_quotient
);
}
trace!(
"Previous epoch attesters: {}.",
previous_epoch_attester_indices_hashset.len()
);
/*
* Crosslinks
*/
for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) {
// Clone is used to remove the borrow. It becomes an issue later when trying to mutate
// `self.balances`.
let crosslink_committees_at_slot =
self.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (_crosslink_committee, shard) in crosslink_committees_at_slot {
let shard = shard as u64;
if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) {
// TODO: remove the map.
let attesting_validator_indices: HashSet<usize> = HashSet::from_iter(
winning_root.attesting_validator_indices.iter().cloned(),
);
for index in 0..self.validator_balances.len() {
let base_reward = self.base_reward(index, base_reward_quotient, spec);
if attesting_validator_indices.contains(&index) {
safe_add_assign!(
self.validator_balances[index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
} else {
safe_sub_assign!(self.validator_balances[index], base_reward);
}
}
for index in &winning_root.attesting_validator_indices {
let base_reward = self.base_reward(*index, base_reward_quotient, spec);
safe_add_assign!(
self.validator_balances[*index],
base_reward * winning_root.total_attesting_balance
/ winning_root.total_balance
);
}
}
}
}
/*
* Ejections
*/
self.process_ejections(spec);
/*
* Validator Registry
*/
self.previous_calculation_epoch = self.current_calculation_epoch;
self.previous_epoch_start_shard = self.current_epoch_start_shard;
debug!(
"setting previous_epoch_seed to : {}",
self.current_epoch_seed
);
self.previous_epoch_seed = self.current_epoch_seed;
let should_update_validator_registy = if self.finalized_epoch
> self.validator_registry_update_epoch
{
(0..self.get_current_epoch_committee_count(spec)).all(|i| {
let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count;
self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch
})
} else {
false
};
if should_update_validator_registy {
trace!("updating validator registry.");
self.update_validator_registry(spec);
self.current_calculation_epoch = next_epoch;
self.current_epoch_start_shard = (self.current_epoch_start_shard
+ self.get_current_epoch_committee_count(spec) as u64)
% spec.shard_count;
self.current_epoch_seed = self.generate_seed(self.current_calculation_epoch, spec)?
} else {
trace!("not updating validator registry.");
let epochs_since_last_registry_update =
current_epoch - self.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
self.current_calculation_epoch = next_epoch;
self.current_epoch_seed =
self.generate_seed(self.current_calculation_epoch, spec)?
}
}
self.process_penalties_and_exits(spec);
self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize)
% spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices(
&self.validator_registry,
next_epoch + Epoch::from(spec.entry_exit_delay),
));
self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] =
self.latest_penalized_balances
[current_epoch.as_usize() % spec.latest_penalized_exit_length];
self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self
.get_randao_mix(current_epoch, spec)
.and_then(|x| Some(*x))
.ok_or_else(|| Error::NoRandaoSeed)?;
self.latest_attestations = self
.latest_attestations
.iter()
.filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch)
.cloned()
.collect();
debug!("Epoch transition complete.");
Ok(())
}
}
fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
Hash256::from_slice(&input.hash_tree_root()[..])
}
fn winning_root(
state: &BeaconState,
shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
spec: &ChainSpec,
) -> Result<WinningRoot, WinningRootError> {
let mut attestations = current_epoch_attestations.to_vec();
attestations.append(&mut previous_epoch_attestations.to_vec());
let mut candidates: HashMap<Hash256, WinningRoot> = HashMap::new();
let mut highest_seen_balance = 0;
for a in &attestations {
if a.data.shard != shard {
continue;
}
let shard_block_root = &a.data.shard_block_root;
if candidates.contains_key(shard_block_root) {
continue;
}
let attesting_validator_indices = attestations
.iter()
.try_fold::<_, _, Result<_, BeaconStateError>>(vec![], |mut acc, a| {
if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) {
acc.append(&mut state.get_attestation_participants(
&a.data,
&a.aggregation_bitfield,
spec,
)?);
}
Ok(acc)
})?;
let total_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
let total_attesting_balance: u64 = attesting_validator_indices
.iter()
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec));
if total_attesting_balance > highest_seen_balance {
highest_seen_balance = total_attesting_balance;
}
let candidate_root = WinningRoot {
shard_block_root: *shard_block_root,
attesting_validator_indices,
total_attesting_balance,
total_balance,
};
candidates.insert(*shard_block_root, candidate_root);
}
Ok(candidates
.iter()
.filter_map(|(_hash, candidate)| {
if candidate.total_attesting_balance == highest_seen_balance {
Some(candidate)
} else {
None
}
})
.min_by_key(|candidate| candidate.shard_block_root)
.ok_or_else(|| WinningRootError::NoWinningRoot)?
// TODO: avoid clone.
.clone())
}
impl From<InclusionError> for Error {
fn from(e: InclusionError) -> Error {
Error::InclusionError(e)
}
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<BeaconStateError> for WinningRootError {
fn from(e: BeaconStateError) -> WinningRootError {
WinningRootError::BeaconStateError(e)
}
}

View File

@ -99,7 +99,7 @@ pub fn verify_block_signature(
let proposal = Proposal {
slot: block.slot,
shard: spec.beacon_chain_shard_number,
block_root: Hash256::from(&block.signed_root()[..]),
block_root: Hash256::from_slice(&block.signed_root()[..]),
signature: block.signature.clone(),
};
let domain = spec.get_domain(
@ -187,7 +187,7 @@ pub fn update_randao(
) -> Result<(), BeaconStateError> {
let hashed_reveal = {
let encoded_signature = ssz_encode(reveal);
Hash256::from(&hash(&encoded_signature[..])[..])
Hash256::from_slice(&hash(&encoded_signature[..])[..])
};
let current_epoch = state.slot.epoch(spec.slots_per_epoch);

View File

@ -84,7 +84,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result
.hash_tree_root();
state.latest_active_index_roots[(next_epoch.as_usize()
+ spec.activation_exit_delay as usize)
% spec.latest_active_index_roots_length] = Hash256::from(&active_tree_root[..]);
% spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]);
state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] =
state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length];

View File

@ -7,7 +7,7 @@ edition = "2018"
[dependencies]
bls = { path = "../utils/bls" }
boolean-bitfield = { path = "../utils/boolean-bitfield" }
ethereum-types = "0.4.0"
ethereum-types = "0.5"
hashing = { path = "../utils/hashing" }
honey-badger-split = { path = "../utils/honey-badger-split" }
int_to_bytes = { path = "../utils/int_to_bytes" }

View File

@ -27,8 +27,8 @@ impl AttesterSlashingBuilder {
let shard = 0;
let justified_epoch = Epoch::new(0);
let epoch = Epoch::new(0);
let hash_1 = Hash256::from(&[1][..]);
let hash_2 = Hash256::from(&[2][..]);
let hash_1 = Hash256::from_low_u64_le(1);
let hash_2 = Hash256::from_low_u64_le(2);
let mut slashable_attestation_1 = SlashableAttestation {
validator_indices: validator_indices.to_vec(),

View File

@ -469,18 +469,20 @@ impl BeaconState {
let mut input = self
.get_randao_mix(epoch - spec.min_seed_lookahead, spec)
.ok_or_else(|| Error::InsufficientRandaoMixes)?
.as_bytes()
.to_vec();
input.append(
&mut self
.get_active_index_root(epoch, spec)
.ok_or_else(|| Error::InsufficientIndexRoots)?
.as_bytes()
.to_vec(),
);
input.append(&mut int_to_bytes32(epoch.as_u64()));
Ok(Hash256::from(&hash(&input[..])[..]))
Ok(Hash256::from_slice(&hash(&input[..])[..]))
}
/// Returns the beacon proposer index for the `slot`.
@ -1155,7 +1157,7 @@ impl BeaconState {
}
fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 {
Hash256::from(&input.hash_tree_root()[..])
Hash256::from_slice(&input.hash_tree_root()[..])
}
impl Encodable for BeaconState {

View File

@ -145,8 +145,8 @@ impl BeaconStateBuilder {
state.previous_shuffling_epoch = epoch - 1;
state.current_shuffling_epoch = epoch;
state.previous_shuffling_seed = Hash256::from(&b"previous_seed"[..]);
state.current_shuffling_seed = Hash256::from(&b"current_seed"[..]);
state.previous_shuffling_seed = Hash256::from_low_u64_le(0);
state.current_shuffling_seed = Hash256::from_low_u64_le(1);
state.previous_justified_epoch = epoch - 2;
state.justified_epoch = epoch - 1;

View File

@ -0,0 +1,20 @@
use crate::test_utils::TestRandom;
use crate::{Hash256, Slot};
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ProposalSignedData {
pub slot: Slot,
pub shard: u64,
pub block_root: Hash256,
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(ProposalSignedData);
}

View File

@ -25,14 +25,14 @@ impl ProposerSlashingBuilder {
let mut proposal_1 = Proposal {
slot,
shard,
block_root: Hash256::from(&[1][..]),
block_root: Hash256::from_low_u64_le(1),
signature: Signature::empty_signature(),
};
let mut proposal_2 = Proposal {
slot,
shard,
block_root: Hash256::from(&[2][..]),
block_root: Hash256::from_low_u64_le(2),
signature: Signature::empty_signature(),
};

View File

@ -0,0 +1,132 @@
use super::AttestationData;
use crate::chain_spec::ChainSpec;
use crate::test_utils::TestRandom;
use bls::AggregateSignature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct SlashableVoteData {
pub custody_bit_0_indices: Vec<u32>,
pub custody_bit_1_indices: Vec<u32>,
pub data: AttestationData,
pub aggregate_signature: AggregateSignature,
}
impl SlashableVoteData {
/// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
///
/// Spec v0.3.0
pub fn is_double_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
self.data.slot.epoch(spec.epoch_length) == other.data.slot.epoch(spec.epoch_length)
}
/// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
///
/// Spec v0.3.0
pub fn is_surround_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
let source_epoch_1 = self.data.justified_epoch;
let source_epoch_2 = other.data.justified_epoch;
let target_epoch_1 = self.data.slot.epoch(spec.epoch_length);
let target_epoch_2 = other.data.slot.epoch(spec.epoch_length);
(source_epoch_1 < source_epoch_2) && (target_epoch_2 < target_epoch_1)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::chain_spec::ChainSpec;
use crate::slot_epoch::{Epoch, Slot};
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
#[test]
pub fn test_is_double_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
true
)
}
#[test]
pub fn test_is_double_vote_false() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_true_realistic() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(4, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(3, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_false_source_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 2, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_false_target_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
ssz_tests!(SlashableVoteData);
fn create_slashable_vote_data(
slot_factor: u64,
justified_epoch: u64,
spec: &ChainSpec,
) -> SlashableVoteData {
let mut rng = XorShiftRng::from_seed([42; 16]);
let mut slashable_vote = SlashableVoteData::random_for_test(&mut rng);
slashable_vote.data.slot = Slot::new(slot_factor * spec.epoch_length);
slashable_vote.data.justified_epoch = Epoch::new(justified_epoch);
slashable_vote
}
}

View File

@ -6,6 +6,6 @@ impl<T: RngCore> TestRandom<T> for Address {
fn random_for_test(rng: &mut T) -> Self {
let mut key_bytes = vec![0; 20];
rng.fill_bytes(&mut key_bytes);
Address::from(&key_bytes[..])
Address::from_slice(&key_bytes[..])
}
}

View File

@ -6,6 +6,6 @@ impl<T: RngCore> TestRandom<T> for Hash256 {
fn random_for_test(rng: &mut T) -> Self {
let mut key_bytes = vec![0; 32];
rng.fill_bytes(&mut key_bytes);
Hash256::from(&key_bytes[..])
Hash256::from_slice(&key_bytes[..])
}
}

View File

@ -0,0 +1,34 @@
#[cfg(test)]
#[macro_export]
macro_rules! ssz_tests {
($type: ident) => {
#[test]
pub fn test_ssz_round_trip() {
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable};
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::TreeHash;
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
};
}

View File

@ -0,0 +1,36 @@
use crate::{test_utils::TestRandom, Hash256, Slot};
use bls::PublicKey;
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
// The information gathered from the PoW chain validator registration function.
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ValidatorRegistryDeltaBlock {
pub latest_registry_delta_root: Hash256,
pub validator_index: u32,
pub pubkey: PublicKey,
pub slot: Slot,
pub flag: u64,
}
impl Default for ValidatorRegistryDeltaBlock {
/// Yields a "default" `Validator`. Primarily used for testing.
fn default() -> Self {
Self {
latest_registry_delta_root: Hash256::zero(),
validator_index: std::u32::MAX,
pubkey: PublicKey::default(),
slot: Slot::from(std::u64::MAX),
flag: std::u64::MAX,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(ValidatorRegistryDeltaBlock);
}

View File

@ -0,0 +1,9 @@
[package]
name = "merkle_proof"
version = "0.1.0"
authors = ["Michael Sproul <michael@sigmaprime.io>"]
edition = "2018"
[dependencies]
ethereum-types = "0.5"
hashing = { path = "../hashing" }

View File

@ -0,0 +1,148 @@
use ethereum_types::H256;
use hashing::hash;
/// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`.
///
/// The `branch` argument is the main component of the proof: it should be a list of internal
/// node hashes such that the root can be reconstructed (in bottom-up order).
pub fn verify_merkle_proof(
leaf: H256,
branch: &[H256],
depth: usize,
index: usize,
root: H256,
) -> bool {
if branch.len() == depth {
merkle_root_from_branch(leaf, branch, depth, index) == root
} else {
false
}
}
/// Compute a root hash from a leaf and a Merkle proof.
fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 {
assert_eq!(branch.len(), depth, "proof length should equal depth");
let mut merkle_root = leaf.as_bytes().to_vec();
for i in 0..depth {
let ith_bit = (index >> i) & 0x01;
if ith_bit == 1 {
let input = concat(branch[i].as_bytes().to_vec(), merkle_root);
merkle_root = hash(&input);
} else {
let mut input = merkle_root;
input.extend_from_slice(branch[i].as_bytes());
merkle_root = hash(&input);
}
}
H256::from_slice(&merkle_root)
}
/// Concatenate two vectors.
fn concat(mut vec1: Vec<u8>, mut vec2: Vec<u8>) -> Vec<u8> {
vec1.append(&mut vec2);
vec1
}
#[cfg(test)]
mod tests {
use super::*;
fn hash_concat(h1: H256, h2: H256) -> H256 {
H256::from_slice(&hash(&concat(
h1.as_bytes().to_vec(),
h2.as_bytes().to_vec(),
)))
}
#[test]
fn verify_small_example() {
// Construct a small merkle tree manually
let leaf_b00 = H256::from([0xAA; 32]);
let leaf_b01 = H256::from([0xBB; 32]);
let leaf_b10 = H256::from([0xCC; 32]);
let leaf_b11 = H256::from([0xDD; 32]);
let node_b0x = hash_concat(leaf_b00, leaf_b01);
let node_b1x = hash_concat(leaf_b10, leaf_b11);
let root = hash_concat(node_b0x, node_b1x);
// Run some proofs
assert!(verify_merkle_proof(
leaf_b00,
&[leaf_b01, node_b1x],
2,
0b00,
root
));
assert!(verify_merkle_proof(
leaf_b01,
&[leaf_b00, node_b1x],
2,
0b01,
root
));
assert!(verify_merkle_proof(
leaf_b10,
&[leaf_b11, node_b0x],
2,
0b10,
root
));
assert!(verify_merkle_proof(
leaf_b11,
&[leaf_b10, node_b0x],
2,
0b11,
root
));
assert!(verify_merkle_proof(
leaf_b11,
&[leaf_b10],
1,
0b11,
node_b1x
));
// Ensure that incorrect proofs fail
// Zero-length proof
assert!(!verify_merkle_proof(leaf_b01, &[], 2, 0b01, root));
// Proof in reverse order
assert!(!verify_merkle_proof(
leaf_b01,
&[node_b1x, leaf_b00],
2,
0b01,
root
));
// Proof too short
assert!(!verify_merkle_proof(leaf_b01, &[leaf_b00], 2, 0b01, root));
// Wrong index
assert!(!verify_merkle_proof(
leaf_b01,
&[leaf_b00, node_b1x],
2,
0b10,
root
));
// Wrong root
assert!(!verify_merkle_proof(
leaf_b01,
&[leaf_b00, node_b1x],
2,
0b01,
node_b1x
));
}
#[test]
fn verify_zero_depth() {
let leaf = H256::from([0xD6; 32]);
let junk = H256::from([0xD7; 32]);
assert!(verify_merkle_proof(leaf, &[], 0, 0, leaf));
assert!(!verify_merkle_proof(leaf, &[], 0, 7, junk));
}
}

View File

@ -6,5 +6,5 @@ edition = "2018"
[dependencies]
bytes = "0.4.9"
ethereum-types = "0.4.0"
ethereum-types = "0.5"
hashing = { path = "../hashing" }

View File

@ -59,7 +59,7 @@ impl Decodable for H256 {
if bytes.len() < 32 || bytes.len() - 32 < index {
Err(DecodeError::TooShort)
} else {
Ok((H256::from(&bytes[index..(index + 32)]), index + 32))
Ok((H256::from_slice(&bytes[index..(index + 32)]), index + 32))
}
}
}
@ -69,7 +69,7 @@ impl Decodable for Address {
if bytes.len() < 20 || bytes.len() - 20 < index {
Err(DecodeError::TooShort)
} else {
Ok((Address::from(&bytes[index..(index + 20)]), index + 20))
Ok((Address::from_slice(&bytes[index..(index + 20)]), index + 20))
}
}
}
@ -95,7 +95,7 @@ mod tests {
*/
let input = vec![42_u8; 32];
let (decoded, i) = H256::ssz_decode(&input, 0).unwrap();
assert_eq!(decoded.to_vec(), input);
assert_eq!(decoded.as_bytes(), &input[..]);
assert_eq!(i, 32);
/*
@ -104,7 +104,7 @@ mod tests {
let mut input = vec![42_u8; 32];
input.push(12);
let (decoded, i) = H256::ssz_decode(&input, 0).unwrap();
assert_eq!(decoded.to_vec()[..], input[0..32]);
assert_eq!(decoded.as_bytes(), &input[0..32]);
assert_eq!(i, 32);
/*

View File

@ -55,13 +55,13 @@ impl Encodable for bool {
impl Encodable for H256 {
fn ssz_append(&self, s: &mut SszStream) {
s.append_encoded_raw(&self.to_vec());
s.append_encoded_raw(self.as_bytes());
}
}
impl Encodable for Address {
fn ssz_append(&self, s: &mut SszStream) {
s.append_encoded_raw(&self.to_vec());
s.append_encoded_raw(self.as_bytes());
}
}

View File

@ -1,11 +1,13 @@
use std::fs;
use std::path::PathBuf;
use types::ChainSpec;
/// Stores the core configuration for this validator instance.
#[derive(Clone)]
pub struct ClientConfig {
pub data_dir: PathBuf,
pub server: String,
pub spec: ChainSpec,
}
const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse-validators";
@ -20,6 +22,11 @@ impl ClientConfig {
fs::create_dir_all(&data_dir)
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
let server = "localhost:50051".to_string();
Self { data_dir, server }
let spec = ChainSpec::foundation();
Self {
data_dir,
server,
spec,
}
}
}

View File

@ -43,6 +43,16 @@ fn main() {
.help("Address to connect to BeaconNode.")
.takes_value(true),
)
.arg(
Arg::with_name("spec")
.long("spec")
.value_name("spec")
.short("s")
.help("Configuration of Beacon Chain")
.takes_value(true)
.possible_values(&["foundation", "few_validators"])
.default_value("foundation"),
)
.get_matches();
let mut config = ClientConfig::default();
@ -62,6 +72,17 @@ fn main() {
}
}
// TODO: Permit loading a custom spec from file.
// Custom spec
if let Some(spec_str) = matches.value_of("spec") {
match spec_str {
"foundation" => config.spec = ChainSpec::foundation(),
"few_validators" => config.spec = ChainSpec::few_validators(),
// Should be impossible due to clap's `possible_values(..)` function.
_ => unreachable!(),
};
}
// Log configuration
info!(log, "";
"data_dir" => &config.data_dir.to_str(),
@ -81,11 +102,8 @@ fn main() {
Arc::new(ValidatorServiceClient::new(ch))
};
// Ethereum
//
// TODO: Permit loading a custom spec from file.
// https://github.com/sigp/lighthouse/issues/160
let spec = Arc::new(ChainSpec::foundation());
// Spec
let spec = Arc::new(config.spec.clone());
// Clock for determining the present slot.
// TODO: this shouldn't be a static time, instead it should be pulled from the beacon node.