Fix clippy warnings (#813)
* Clippy account manager * Clippy account_manager * Clippy beacon_node/beacon_chain * Clippy beacon_node/client * Clippy beacon_node/eth1 * Clippy beacon_node/eth2-libp2p * Clippy beacon_node/genesis * Clippy beacon_node/network * Clippy beacon_node/rest_api * Clippy beacon_node/src * Clippy beacon_node/store * Clippy eth2/lmd_ghost * Clippy eth2/operation_pool * Clippy eth2/state_processing * Clippy eth2/types * Clippy eth2/utils/bls * Clippy eth2/utils/cahced_tree_hash * Clippy eth2/utils/deposit_contract * Clippy eth2/utils/eth2_interop_keypairs * Clippy eth2/utils/eth2_testnet_config * Clippy eth2/utils/lighthouse_metrics * Clippy eth2/utils/ssz * Clippy eth2/utils/ssz_types * Clippy eth2/utils/tree_hash_derive * Clippy lcli * Clippy tests/beacon_chain_sim * Clippy validator_client * Cargo fmt
This commit is contained in:
parent
1abb964652
commit
7396cd2cab
@ -156,7 +156,7 @@ fn run_new_validator_subcommand<T: EthSpec>(
|
|||||||
})
|
})
|
||||||
.map(|password| {
|
.map(|password| {
|
||||||
// Trim the line feed from the end of the password file, if present.
|
// Trim the line feed from the end of the password file, if present.
|
||||||
if password.ends_with("\n") {
|
if password.ends_with('\n') {
|
||||||
password[0..password.len() - 1].to_string()
|
password[0..password.len() - 1].to_string()
|
||||||
} else {
|
} else {
|
||||||
password
|
password
|
||||||
@ -337,7 +337,7 @@ fn deposit_validators<E: EthSpec>(
|
|||||||
.map(|_| event_loop)
|
.map(|_| event_loop)
|
||||||
})
|
})
|
||||||
// Web3 gives errors if the event loop is dropped whilst performing requests.
|
// Web3 gives errors if the event loop is dropped whilst performing requests.
|
||||||
.map(|event_loop| drop(event_loop))
|
.map(drop)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node.
|
/// For the given `ValidatorDirectory`, submit a deposit transaction to the `web3` node.
|
||||||
@ -367,7 +367,7 @@ fn deposit_validator(
|
|||||||
.into_future()
|
.into_future()
|
||||||
.and_then(move |(voting_keypair, deposit_data)| {
|
.and_then(move |(voting_keypair, deposit_data)| {
|
||||||
let pubkey_1 = voting_keypair.pk.clone();
|
let pubkey_1 = voting_keypair.pk.clone();
|
||||||
let pubkey_2 = voting_keypair.pk.clone();
|
let pubkey_2 = voting_keypair.pk;
|
||||||
|
|
||||||
let web3_1 = web3.clone();
|
let web3_1 = web3.clone();
|
||||||
let web3_2 = web3.clone();
|
let web3_2 = web3.clone();
|
||||||
@ -421,7 +421,7 @@ fn deposit_validator(
|
|||||||
to: Some(deposit_contract),
|
to: Some(deposit_contract),
|
||||||
gas: Some(U256::from(DEPOSIT_GAS)),
|
gas: Some(U256::from(DEPOSIT_GAS)),
|
||||||
gas_price: None,
|
gas_price: None,
|
||||||
value: Some(U256::from(from_gwei(deposit_amount))),
|
value: Some(from_gwei(deposit_amount)),
|
||||||
data: Some(deposit_data.into()),
|
data: Some(deposit_data.into()),
|
||||||
nonce: None,
|
nonce: None,
|
||||||
condition: None,
|
condition: None,
|
||||||
|
@ -25,6 +25,7 @@ use state_processing::{
|
|||||||
per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy,
|
per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy,
|
||||||
};
|
};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -512,9 +513,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn state_at_slot(&self, slot: Slot) -> Result<BeaconState<T::EthSpec>, Error> {
|
pub fn state_at_slot(&self, slot: Slot) -> Result<BeaconState<T::EthSpec>, Error> {
|
||||||
let head_state = self.head()?.beacon_state;
|
let head_state = self.head()?.beacon_state;
|
||||||
|
|
||||||
if slot == head_state.slot {
|
match slot.cmp(&head_state.slot) {
|
||||||
Ok(head_state)
|
Ordering::Equal => Ok(head_state),
|
||||||
} else if slot > head_state.slot {
|
Ordering::Greater => {
|
||||||
if slot > head_state.slot + T::EthSpec::slots_per_epoch() {
|
if slot > head_state.slot + T::EthSpec::slots_per_epoch() {
|
||||||
warn!(
|
warn!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -560,7 +561,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
Ok(state)
|
Ok(state)
|
||||||
} else {
|
}
|
||||||
|
Ordering::Less => {
|
||||||
let state_root = self
|
let state_root = self
|
||||||
.rev_iter_state_roots()?
|
.rev_iter_state_roots()?
|
||||||
.take_while(|(_root, current_slot)| *current_slot >= slot)
|
.take_while(|(_root, current_slot)| *current_slot >= slot)
|
||||||
@ -573,6 +575,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.ok_or_else(|| Error::NoStateForSlot(slot))?)
|
.ok_or_else(|| Error::NoStateForSlot(slot))?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `BeaconState` the current slot (viz., `self.slot()`).
|
/// Returns the `BeaconState` the current slot (viz., `self.slot()`).
|
||||||
///
|
///
|
||||||
@ -638,7 +641,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let head_state = &self.head()?.beacon_state;
|
let head_state = &self.head()?.beacon_state;
|
||||||
|
|
||||||
let mut state = if epoch(slot) == epoch(head_state.slot) {
|
let mut state = if epoch(slot) == epoch(head_state.slot) {
|
||||||
self.head()?.beacon_state.clone()
|
self.head()?.beacon_state
|
||||||
} else {
|
} else {
|
||||||
self.state_at_slot(slot)?
|
self.state_at_slot(slot)?
|
||||||
};
|
};
|
||||||
@ -671,7 +674,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let head_state = &self.head()?.beacon_state;
|
let head_state = &self.head()?.beacon_state;
|
||||||
|
|
||||||
let mut state = if epoch == as_epoch(head_state.slot) {
|
let mut state = if epoch == as_epoch(head_state.slot) {
|
||||||
self.head()?.beacon_state.clone()
|
self.head()?.beacon_state
|
||||||
} else {
|
} else {
|
||||||
self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))?
|
self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))?
|
||||||
};
|
};
|
||||||
@ -1754,9 +1757,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let mut dump = vec![];
|
let mut dump = vec![];
|
||||||
|
|
||||||
let mut last_slot = CheckPoint {
|
let mut last_slot = CheckPoint {
|
||||||
beacon_block: self.head()?.beacon_block.clone(),
|
beacon_block: self.head()?.beacon_block,
|
||||||
beacon_block_root: self.head()?.beacon_block_root,
|
beacon_block_root: self.head()?.beacon_block_root,
|
||||||
beacon_state: self.head()?.beacon_state.clone(),
|
beacon_state: self.head()?.beacon_state,
|
||||||
beacon_state_root: self.head()?.beacon_state_root,
|
beacon_state_root: self.head()?.beacon_state_root,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -448,7 +448,7 @@ where
|
|||||||
let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain {
|
let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain {
|
||||||
ForkChoice::from_ssz_container(
|
ForkChoice::from_ssz_container(
|
||||||
persisted_beacon_chain.fork_choice.clone(),
|
persisted_beacon_chain.fork_choice.clone(),
|
||||||
store.clone(),
|
store,
|
||||||
block_root_tree,
|
block_root_tree,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))?
|
.map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))?
|
||||||
@ -462,7 +462,7 @@ where
|
|||||||
.ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?;
|
.ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?;
|
||||||
|
|
||||||
let backend = ThreadSafeReducedTree::new(
|
let backend = ThreadSafeReducedTree::new(
|
||||||
store.clone(),
|
store,
|
||||||
block_root_tree,
|
block_root_tree,
|
||||||
&finalized_checkpoint.beacon_block,
|
&finalized_checkpoint.beacon_block,
|
||||||
finalized_checkpoint.beacon_block_root,
|
finalized_checkpoint.beacon_block_root,
|
||||||
@ -626,7 +626,7 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn recent_genesis() {
|
fn recent_genesis() {
|
||||||
let validator_count = 8;
|
let validator_count = 8;
|
||||||
let genesis_time = 13371337;
|
let genesis_time = 13_371_337;
|
||||||
|
|
||||||
let log = get_logger();
|
let log = get_logger();
|
||||||
let store = Arc::new(MemoryStore::open());
|
let store = Arc::new(MemoryStore::open());
|
||||||
@ -641,7 +641,7 @@ mod test {
|
|||||||
|
|
||||||
let chain = BeaconChainBuilder::new(MinimalEthSpec)
|
let chain = BeaconChainBuilder::new(MinimalEthSpec)
|
||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
.store(store.clone())
|
.store(store)
|
||||||
.store_migrator(NullMigrator)
|
.store_migrator(NullMigrator)
|
||||||
.genesis_state(genesis_state)
|
.genesis_state(genesis_state)
|
||||||
.expect("should build state using recent genesis")
|
.expect("should build state using recent genesis")
|
||||||
@ -662,7 +662,7 @@ mod test {
|
|||||||
|
|
||||||
assert_eq!(state.slot, Slot::new(0), "should start from genesis");
|
assert_eq!(state.slot, Slot::new(0), "should start from genesis");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.genesis_time, 13371337,
|
state.genesis_time, 13_371_337,
|
||||||
"should have the correct genesis time"
|
"should have the correct genesis time"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -8,6 +8,7 @@ use rand::prelude::*;
|
|||||||
use slog::{crit, debug, error, trace, Logger};
|
use slog::{crit, debug, error, trace, Logger};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use state_processing::per_block_processing::get_new_eth1_data;
|
use state_processing::per_block_processing::get_new_eth1_data;
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::iter::DoubleEndedIterator;
|
use std::iter::DoubleEndedIterator;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
@ -343,8 +344,7 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
|||||||
.iter()
|
.iter()
|
||||||
.rev()
|
.rev()
|
||||||
.skip_while(|eth1_block| eth1_block.timestamp > voting_period_start_seconds)
|
.skip_while(|eth1_block| eth1_block.timestamp > voting_period_start_seconds)
|
||||||
.skip(eth1_follow_distance as usize)
|
.nth(eth1_follow_distance as usize)
|
||||||
.next()
|
|
||||||
.map(|block| {
|
.map(|block| {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -392,11 +392,10 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
|||||||
state.eth1_data.deposit_count
|
state.eth1_data.deposit_count
|
||||||
};
|
};
|
||||||
|
|
||||||
if deposit_index > deposit_count {
|
match deposit_index.cmp(&deposit_count) {
|
||||||
Err(Error::DepositIndexTooHigh)
|
Ordering::Greater => Err(Error::DepositIndexTooHigh),
|
||||||
} else if deposit_index == deposit_count {
|
Ordering::Equal => Ok(vec![]),
|
||||||
Ok(vec![])
|
Ordering::Less => {
|
||||||
} else {
|
|
||||||
let next = deposit_index;
|
let next = deposit_index;
|
||||||
let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64());
|
let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64());
|
||||||
|
|
||||||
@ -409,6 +408,7 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
|||||||
.map(|(_deposit_root, deposits)| deposits)
|
.map(|(_deposit_root, deposits)| deposits)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return encoded byte representation of the block and deposit caches.
|
/// Return encoded byte representation of the block and deposit caches.
|
||||||
fn as_bytes(&self) -> Vec<u8> {
|
fn as_bytes(&self) -> Vec<u8> {
|
||||||
@ -775,7 +775,7 @@ mod test {
|
|||||||
|
|
||||||
let deposits_for_inclusion = eth1_chain
|
let deposits_for_inclusion = eth1_chain
|
||||||
.deposits_for_block_inclusion(&state, &random_eth1_data(), spec)
|
.deposits_for_block_inclusion(&state, &random_eth1_data(), spec)
|
||||||
.expect(&format!("should find deposit for {}", i));
|
.unwrap_or_else(|_| panic!("should find deposit for {}", i));
|
||||||
|
|
||||||
let expected_len =
|
let expected_len =
|
||||||
std::cmp::min(i - initial_deposit_index, max_deposits as usize);
|
std::cmp::min(i - initial_deposit_index, max_deposits as usize);
|
||||||
@ -853,7 +853,7 @@ mod test {
|
|||||||
state.slot = Slot::new(period * 1_000 + period / 2);
|
state.slot = Slot::new(period * 1_000 + period / 2);
|
||||||
|
|
||||||
// Add 50% of the votes so a lookup is required.
|
// Add 50% of the votes so a lookup is required.
|
||||||
for _ in 0..period / 2 + 1 {
|
for _ in 0..=period / 2 {
|
||||||
state
|
state
|
||||||
.eth1_data_votes
|
.eth1_data_votes
|
||||||
.push(random_eth1_data())
|
.push(random_eth1_data())
|
||||||
@ -932,7 +932,7 @@ mod test {
|
|||||||
state.slot = Slot::new(period / 2);
|
state.slot = Slot::new(period / 2);
|
||||||
|
|
||||||
// Add 50% of the votes so a lookup is required.
|
// Add 50% of the votes so a lookup is required.
|
||||||
for _ in 0..period / 2 + 1 {
|
for _ in 0..=period / 2 {
|
||||||
state
|
state
|
||||||
.eth1_data_votes
|
.eth1_data_votes
|
||||||
.push(random_eth1_data())
|
.push(random_eth1_data())
|
||||||
@ -1090,7 +1090,7 @@ mod test {
|
|||||||
eth1_block.number,
|
eth1_block.number,
|
||||||
*new_eth1_data
|
*new_eth1_data
|
||||||
.get(ð1_block.clone().eth1_data().unwrap())
|
.get(ð1_block.clone().eth1_data().unwrap())
|
||||||
.expect(&format!(
|
.unwrap_or_else(|| panic!(
|
||||||
"new_eth1_data should have expected block #{}",
|
"new_eth1_data should have expected block #{}",
|
||||||
eth1_block.number
|
eth1_block.number
|
||||||
))
|
))
|
||||||
@ -1135,8 +1135,8 @@ mod test {
|
|||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(
|
||||||
&state,
|
&state,
|
||||||
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
HashMap::from_iter(new_eth1_data.into_iter()),
|
||||||
HashMap::from_iter(all_eth1_data.clone().into_iter()),
|
HashMap::from_iter(all_eth1_data.into_iter()),
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
votes.len(),
|
votes.len(),
|
||||||
@ -1164,7 +1164,7 @@ mod test {
|
|||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(
|
||||||
&state,
|
&state,
|
||||||
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
||||||
HashMap::from_iter(all_eth1_data.clone().into_iter()),
|
HashMap::from_iter(all_eth1_data.into_iter()),
|
||||||
);
|
);
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
votes,
|
votes,
|
||||||
@ -1196,8 +1196,8 @@ mod test {
|
|||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(
|
||||||
&state,
|
&state,
|
||||||
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
HashMap::from_iter(new_eth1_data.into_iter()),
|
||||||
HashMap::from_iter(all_eth1_data.clone().into_iter()),
|
HashMap::from_iter(all_eth1_data.into_iter()),
|
||||||
);
|
);
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
votes,
|
votes,
|
||||||
@ -1230,12 +1230,12 @@ mod test {
|
|||||||
.expect("should have some eth1 data")
|
.expect("should have some eth1 data")
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
state.eth1_data_votes = vec![non_new_eth1_data.0.clone()].into();
|
state.eth1_data_votes = vec![non_new_eth1_data.0].into();
|
||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(
|
||||||
&state,
|
&state,
|
||||||
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
HashMap::from_iter(new_eth1_data.into_iter()),
|
||||||
HashMap::from_iter(all_eth1_data.clone().into_iter()),
|
HashMap::from_iter(all_eth1_data.into_iter()),
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
@ -1268,8 +1268,8 @@ mod test {
|
|||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(
|
||||||
&state,
|
&state,
|
||||||
HashMap::from_iter(new_eth1_data.clone().into_iter()),
|
HashMap::from_iter(new_eth1_data.into_iter()),
|
||||||
HashMap::from_iter(all_eth1_data.clone().into_iter()),
|
HashMap::from_iter(all_eth1_data.into_iter()),
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
|
@ -294,7 +294,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
/// Returns a `SszForkChoice` which contains the current state of `Self`.
|
/// Returns a `SszForkChoice` which contains the current state of `Self`.
|
||||||
pub fn as_ssz_container(&self) -> SszForkChoice {
|
pub fn as_ssz_container(&self) -> SszForkChoice {
|
||||||
SszForkChoice {
|
SszForkChoice {
|
||||||
genesis_block_root: self.genesis_block_root.clone(),
|
genesis_block_root: self.genesis_block_root,
|
||||||
justified_checkpoint: self.justified_checkpoint.read().clone(),
|
justified_checkpoint: self.justified_checkpoint.read().clone(),
|
||||||
best_justified_checkpoint: self.best_justified_checkpoint.read().clone(),
|
best_justified_checkpoint: self.best_justified_checkpoint.read().clone(),
|
||||||
backend_bytes: self.backend.as_bytes(),
|
backend_bytes: self.backend.as_bytes(),
|
||||||
|
@ -59,10 +59,10 @@ impl HeadTracker {
|
|||||||
let slots_len = ssz_container.slots.len();
|
let slots_len = ssz_container.slots.len();
|
||||||
|
|
||||||
if roots_len != slots_len {
|
if roots_len != slots_len {
|
||||||
return Err(Error::MismatchingLengths {
|
Err(Error::MismatchingLengths {
|
||||||
roots_len,
|
roots_len,
|
||||||
slots_len,
|
slots_len,
|
||||||
});
|
})
|
||||||
} else {
|
} else {
|
||||||
let map = HashMap::from_iter(
|
let map = HashMap::from_iter(
|
||||||
ssz_container
|
ssz_container
|
||||||
|
@ -173,7 +173,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
|
|
||||||
let chain = BeaconChainBuilder::new(eth_spec_instance)
|
let chain = BeaconChainBuilder::new(eth_spec_instance)
|
||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
.custom_spec(spec.clone())
|
.custom_spec(spec)
|
||||||
.store(store.clone())
|
.store(store.clone())
|
||||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
||||||
.resume_from_db(Eth1Config::default())
|
.resume_from_db(Eth1Config::default())
|
||||||
@ -236,7 +236,6 @@ where
|
|||||||
self.chain
|
self.chain
|
||||||
.state_at_slot(state_slot)
|
.state_at_slot(state_slot)
|
||||||
.expect("should find state for slot")
|
.expect("should find state for slot")
|
||||||
.clone()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Determine the first slot where a block should be built.
|
// Determine the first slot where a block should be built.
|
||||||
|
@ -151,7 +151,7 @@ where
|
|||||||
|
|
||||||
let builder = BeaconChainBuilder::new(eth_spec_instance)
|
let builder = BeaconChainBuilder::new(eth_spec_instance)
|
||||||
.logger(context.log.clone())
|
.logger(context.log.clone())
|
||||||
.store(store.clone())
|
.store(store)
|
||||||
.store_migrator(store_migrator)
|
.store_migrator(store_migrator)
|
||||||
.custom_spec(spec.clone());
|
.custom_spec(spec.clone());
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ where
|
|||||||
Box::new(future)
|
Box::new(future)
|
||||||
}
|
}
|
||||||
ClientGenesis::RemoteNode { server, .. } => {
|
ClientGenesis::RemoteNode { server, .. } => {
|
||||||
let future = Bootstrapper::connect(server.to_string(), &context.log)
|
let future = Bootstrapper::connect(server, &context.log)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
format!("Failed to initialize bootstrap client: {}", e)
|
format!("Failed to initialize bootstrap client: {}", e)
|
||||||
})
|
})
|
||||||
@ -306,14 +306,14 @@ where
|
|||||||
.ok_or_else(|| "http_server requires a libp2p network sender")?;
|
.ok_or_else(|| "http_server requires a libp2p network sender")?;
|
||||||
|
|
||||||
let network_info = rest_api::NetworkInfo {
|
let network_info = rest_api::NetworkInfo {
|
||||||
network_service: network.clone(),
|
network_service: network,
|
||||||
network_chan: network_send.clone(),
|
network_chan: network_send,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (exit_signal, listening_addr) = rest_api::start_server(
|
let (exit_signal, listening_addr) = rest_api::start_server(
|
||||||
&client_config.rest_api,
|
&client_config.rest_api,
|
||||||
&context.executor,
|
&context.executor,
|
||||||
beacon_chain.clone(),
|
beacon_chain,
|
||||||
network_info,
|
network_info,
|
||||||
client_config
|
client_config
|
||||||
.create_db_path()
|
.create_db_path()
|
||||||
@ -529,7 +529,7 @@ where
|
|||||||
spec,
|
spec,
|
||||||
context.log,
|
context.log,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Unable to open database: {:?}", e).to_string())?;
|
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||||
self.store = Some(Arc::new(store));
|
self.store = Some(Arc::new(store));
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
@ -557,8 +557,8 @@ where
|
|||||||
{
|
{
|
||||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
/// Specifies that the `Client` should use a `DiskStore` database.
|
||||||
pub fn simple_disk_store(mut self, path: &Path) -> Result<Self, String> {
|
pub fn simple_disk_store(mut self, path: &Path) -> Result<Self, String> {
|
||||||
let store = SimpleDiskStore::open(path)
|
let store =
|
||||||
.map_err(|e| format!("Unable to open database: {:?}", e).to_string())?;
|
SimpleDiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||||
self.store = Some(Arc::new(store));
|
self.store = Some(Arc::new(store));
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
@ -660,7 +660,7 @@ where
|
|||||||
.ok_or_else(|| "caching_eth1_backend requires a store".to_string())?;
|
.ok_or_else(|| "caching_eth1_backend requires a store".to_string())?;
|
||||||
|
|
||||||
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
|
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
|
||||||
eth1_service_from_genesis.update_config(config.clone())?;
|
eth1_service_from_genesis.update_config(config)?;
|
||||||
|
|
||||||
// This cache is not useful because it's first (earliest) block likely the block that
|
// This cache is not useful because it's first (earliest) block likely the block that
|
||||||
// triggered genesis.
|
// triggered genesis.
|
||||||
|
@ -17,7 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1;
|
|||||||
const SECS_PER_MINUTE: f64 = 60.0;
|
const SECS_PER_MINUTE: f64 = 60.0;
|
||||||
const SECS_PER_HOUR: f64 = 3600.0;
|
const SECS_PER_HOUR: f64 = 3600.0;
|
||||||
const SECS_PER_DAY: f64 = 86400.0; // non-leap
|
const SECS_PER_DAY: f64 = 86400.0; // non-leap
|
||||||
const SECS_PER_WEEK: f64 = 604800.0; // non-leap
|
const SECS_PER_WEEK: f64 = 604_800.0; // non-leap
|
||||||
const DAYS_PER_WEEK: f64 = 7.0;
|
const DAYS_PER_WEEK: f64 = 7.0;
|
||||||
const HOURS_PER_DAY: f64 = 24.0;
|
const HOURS_PER_DAY: f64 = 24.0;
|
||||||
const MINUTES_PER_HOUR: f64 = 60.0;
|
const MINUTES_PER_HOUR: f64 = 60.0;
|
||||||
@ -166,13 +166,14 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
|||||||
.then(move |result| {
|
.then(move |result| {
|
||||||
match result {
|
match result {
|
||||||
Ok(()) => Ok(()),
|
Ok(()) => Ok(()),
|
||||||
Err(e) => Ok(error!(
|
Err(e) => {
|
||||||
|
error!(
|
||||||
log_3,
|
log_3,
|
||||||
"Notifier failed to notify";
|
"Notifier failed to notify";
|
||||||
"error" => format!("{:?}", e)
|
"error" => format!("{:?}", e)
|
||||||
))
|
);
|
||||||
}
|
Ok(())
|
||||||
});
|
} } });
|
||||||
|
|
||||||
let (exit_signal, exit) = exit_future::signal();
|
let (exit_signal, exit) = exit_future::signal();
|
||||||
context
|
context
|
||||||
|
@ -224,7 +224,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut cache_2 = cache.clone();
|
let mut cache_2 = cache;
|
||||||
cache_2.truncate(17);
|
cache_2.truncate(17);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cache_2.blocks.len(),
|
cache_2.blocks.len(),
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use crate::DepositLog;
|
use crate::DepositLog;
|
||||||
use eth2_hashing::hash;
|
use eth2_hashing::hash;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::cmp::Ordering;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH};
|
use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH};
|
||||||
|
|
||||||
@ -196,24 +197,26 @@ impl DepositCache {
|
|||||||
/// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty).
|
/// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty).
|
||||||
/// - If a log with `log.index` is already known, but the given `log` is distinct to it.
|
/// - If a log with `log.index` is already known, but the given `log` is distinct to it.
|
||||||
pub fn insert_log(&mut self, log: DepositLog) -> Result<(), Error> {
|
pub fn insert_log(&mut self, log: DepositLog) -> Result<(), Error> {
|
||||||
if log.index == self.logs.len() as u64 {
|
match log.index.cmp(&(self.logs.len() as u64)) {
|
||||||
|
Ordering::Equal => {
|
||||||
let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root());
|
let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root());
|
||||||
self.leaves.push(deposit);
|
self.leaves.push(deposit);
|
||||||
self.logs.push(log);
|
self.logs.push(log);
|
||||||
self.deposit_tree.push_leaf(deposit)?;
|
self.deposit_tree.push_leaf(deposit)?;
|
||||||
self.deposit_roots.push(self.deposit_tree.root());
|
self.deposit_roots.push(self.deposit_tree.root());
|
||||||
Ok(())
|
Ok(())
|
||||||
} else if log.index < self.logs.len() as u64 {
|
}
|
||||||
|
Ordering::Less => {
|
||||||
if self.logs[log.index as usize] == log {
|
if self.logs[log.index as usize] == log {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::DuplicateDistinctLog(log.index))
|
Err(Error::DuplicateDistinctLog(log.index))
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
Err(Error::NonConsecutive {
|
Ordering::Greater => Err(Error::NonConsecutive {
|
||||||
log_index: log.index,
|
log_index: log.index,
|
||||||
expected: self.logs.len(),
|
expected: self.logs.len(),
|
||||||
})
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,14 +315,12 @@ impl DepositCache {
|
|||||||
.logs
|
.logs
|
||||||
.binary_search_by(|deposit| deposit.block_number.cmp(&block_number));
|
.binary_search_by(|deposit| deposit.block_number.cmp(&block_number));
|
||||||
match index {
|
match index {
|
||||||
Ok(index) => return self.logs.get(index).map(|x| x.index + 1),
|
Ok(index) => self.logs.get(index).map(|x| x.index + 1),
|
||||||
Err(next) => {
|
Err(next) => Some(
|
||||||
return Some(
|
|
||||||
self.logs
|
self.logs
|
||||||
.get(next.saturating_sub(1))
|
.get(next.saturating_sub(1))
|
||||||
.map_or(0, |x| x.index + 1),
|
.map_or(0, |x| x.index + 1),
|
||||||
)
|
),
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,7 +330,7 @@ impl DepositCache {
|
|||||||
/// and queries the `deposit_roots` map to get the corresponding `deposit_root`.
|
/// and queries the `deposit_roots` map to get the corresponding `deposit_root`.
|
||||||
pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option<Hash256> {
|
pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option<Hash256> {
|
||||||
let index = self.get_deposit_count_from_cache(block_number)?;
|
let index = self.get_deposit_count_from_cache(block_number)?;
|
||||||
Some(self.deposit_roots.get(index as usize)?.clone())
|
Some(*self.deposit_roots.get(index as usize)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ pub fn get_deposit_count(
|
|||||||
timeout,
|
timeout,
|
||||||
)
|
)
|
||||||
.and_then(|result| match result {
|
.and_then(|result| match result {
|
||||||
None => Err(format!("Deposit root response was none")),
|
None => Err("Deposit root response was none".to_string()),
|
||||||
Some(bytes) => {
|
Some(bytes) => {
|
||||||
if bytes.is_empty() {
|
if bytes.is_empty() {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@ -173,7 +173,7 @@ pub fn get_deposit_root(
|
|||||||
timeout,
|
timeout,
|
||||||
)
|
)
|
||||||
.and_then(|result| match result {
|
.and_then(|result| match result {
|
||||||
None => Err(format!("Deposit root response was none")),
|
None => Err("Deposit root response was none".to_string()),
|
||||||
Some(bytes) => {
|
Some(bytes) => {
|
||||||
if bytes.is_empty() {
|
if bytes.is_empty() {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
@ -613,7 +613,7 @@ impl Service {
|
|||||||
|
|
||||||
Ok(BlockCacheUpdateOutcome::Success {
|
Ok(BlockCacheUpdateOutcome::Success {
|
||||||
blocks_imported,
|
blocks_imported,
|
||||||
head_block_number: cache_4.clone().block_cache.read().highest_block_number(),
|
head_block_number: cache_4.block_cache.read().highest_block_number(),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -758,7 +758,7 @@ mod fast {
|
|||||||
log,
|
log,
|
||||||
);
|
);
|
||||||
let n = 10;
|
let n = 10;
|
||||||
let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
deposit_contract
|
deposit_contract
|
||||||
.deposit(runtime, deposit.clone())
|
.deposit(runtime, deposit.clone())
|
||||||
|
@ -57,7 +57,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
net_conf: &NetworkConfig,
|
net_conf: &NetworkConfig,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> error::Result<Self> {
|
) -> error::Result<Self> {
|
||||||
let local_peer_id = local_key.public().clone().into_peer_id();
|
let local_peer_id = local_key.public().into_peer_id();
|
||||||
let behaviour_log = log.new(o!());
|
let behaviour_log = log.new(o!());
|
||||||
|
|
||||||
let ping_config = PingConfig::new()
|
let ping_config = PingConfig::new()
|
||||||
@ -74,7 +74,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
|
|
||||||
Ok(Behaviour {
|
Ok(Behaviour {
|
||||||
eth2_rpc: RPC::new(log.clone()),
|
eth2_rpc: RPC::new(log.clone()),
|
||||||
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
|
gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()),
|
||||||
discovery: Discovery::new(local_key, net_conf, log)?,
|
discovery: Discovery::new(local_key, net_conf, log)?,
|
||||||
ping: Ping::new(ping_config),
|
ping: Ping::new(ping_config),
|
||||||
identify,
|
identify,
|
||||||
|
@ -145,7 +145,7 @@ where
|
|||||||
// When terminating a stream, report the stream termination to the requesting user via
|
// When terminating a stream, report the stream termination to the requesting user via
|
||||||
// an RPC error
|
// an RPC error
|
||||||
let error = RPCErrorResponse::ServerError(ErrorMessage {
|
let error = RPCErrorResponse::ServerError(ErrorMessage {
|
||||||
error_message: "Request timed out".as_bytes().to_vec(),
|
error_message: b"Request timed out".to_vec(),
|
||||||
});
|
});
|
||||||
|
|
||||||
// The stream termination type is irrelevant, this will terminate the
|
// The stream termination type is irrelevant, this will terminate the
|
||||||
@ -510,7 +510,7 @@ where
|
|||||||
// notify the user
|
// notify the user
|
||||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||||
RPCEvent::Error(
|
RPCEvent::Error(
|
||||||
stream_id.get_ref().clone(),
|
*stream_id.get_ref(),
|
||||||
RPCError::Custom("Stream timed out".into()),
|
RPCError::Custom("Stream timed out".into()),
|
||||||
),
|
),
|
||||||
)));
|
)));
|
||||||
|
@ -43,8 +43,7 @@ pub fn build_libp2p_instance(
|
|||||||
) -> LibP2PService {
|
) -> LibP2PService {
|
||||||
let config = build_config(port, boot_nodes, secret_key);
|
let config = build_config(port, boot_nodes, secret_key);
|
||||||
// launch libp2p service
|
// launch libp2p service
|
||||||
let libp2p_service = LibP2PService::new(config.clone(), log.clone()).unwrap();
|
LibP2PService::new(config, log.clone()).unwrap()
|
||||||
libp2p_service
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
@ -64,10 +63,10 @@ pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option<u16>) ->
|
|||||||
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
.map(|x| get_enr(&x).multiaddr()[1].clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for i in 0..n {
|
for (i, node) in nodes.iter_mut().enumerate().take(n) {
|
||||||
for j in i..n {
|
for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) {
|
||||||
if i != j {
|
if i != j {
|
||||||
match libp2p::Swarm::dial_addr(&mut nodes[i].swarm, multiaddrs[j].clone()) {
|
match libp2p::Swarm::dial_addr(&mut node.swarm, multiaddr.clone()) {
|
||||||
Ok(()) => debug!(log, "Connected"),
|
Ok(()) => debug!(log, "Connected"),
|
||||||
Err(_) => error!(log, "Failed to connect"),
|
Err(_) => error!(log, "Failed to connect"),
|
||||||
};
|
};
|
||||||
|
@ -62,7 +62,7 @@ fn test_gossipsub_forward() {
|
|||||||
// Every node except the corner nodes are connected to 2 nodes.
|
// Every node except the corner nodes are connected to 2 nodes.
|
||||||
if subscribed_count == (num_nodes * 2) - 2 {
|
if subscribed_count == (num_nodes * 2) - 2 {
|
||||||
node.swarm.publish(
|
node.swarm.publish(
|
||||||
&vec![Topic::new(topic.into_string())],
|
&[Topic::new(topic.into_string())],
|
||||||
pubsub_message.clone(),
|
pubsub_message.clone(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -96,11 +96,10 @@ fn test_gossipsub_full_mesh_publish() {
|
|||||||
let mut received_count = 0;
|
let mut received_count = 0;
|
||||||
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
||||||
for node in nodes.iter_mut() {
|
for node in nodes.iter_mut() {
|
||||||
loop {
|
while let Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
||||||
match node.poll().unwrap() {
|
|
||||||
Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
|
||||||
topics, message, ..
|
topics, message, ..
|
||||||
})) => {
|
})) = node.poll().unwrap()
|
||||||
|
{
|
||||||
assert_eq!(topics.len(), 1);
|
assert_eq!(topics.len(), 1);
|
||||||
// Assert topic is the published topic
|
// Assert topic is the published topic
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -114,29 +113,22 @@ fn test_gossipsub_full_mesh_publish() {
|
|||||||
return Ok(Async::Ready(()));
|
return Ok(Async::Ready(()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => break,
|
|
||||||
}
|
}
|
||||||
}
|
while let Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) =
|
||||||
}
|
publishing_node.poll().unwrap()
|
||||||
loop {
|
{
|
||||||
match publishing_node.poll().unwrap() {
|
|
||||||
Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => {
|
|
||||||
// Received topics is one of subscribed eth2 topics
|
// Received topics is one of subscribed eth2 topics
|
||||||
assert!(topic.clone().into_string().starts_with("/eth2/"));
|
assert!(topic.clone().into_string().starts_with("/eth2/"));
|
||||||
// Publish on beacon block topic
|
// Publish on beacon block topic
|
||||||
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
|
if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") {
|
||||||
subscribed_count += 1;
|
subscribed_count += 1;
|
||||||
if subscribed_count == num_nodes - 1 {
|
if subscribed_count == num_nodes - 1 {
|
||||||
publishing_node.swarm.publish(
|
publishing_node
|
||||||
&vec![Topic::new(topic.into_string())],
|
.swarm
|
||||||
pubsub_message.clone(),
|
.publish(&[Topic::new(topic.into_string())], pubsub_message.clone());
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Async::NotReady)
|
Ok(Async::NotReady)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ use eth2_libp2p::rpc::methods::*;
|
|||||||
use eth2_libp2p::rpc::*;
|
use eth2_libp2p::rpc::*;
|
||||||
use eth2_libp2p::{Libp2pEvent, RPCEvent};
|
use eth2_libp2p::{Libp2pEvent, RPCEvent};
|
||||||
use slog::{warn, Level};
|
use slog::{warn, Level};
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::prelude::*;
|
use tokio::prelude::*;
|
||||||
@ -106,20 +107,19 @@ fn test_status_rpc() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// execute the futures and check the result
|
// execute the futures and check the result
|
||||||
let test_result = Arc::new(Mutex::new(false));
|
let test_result = Arc::new(AtomicBool::new(false));
|
||||||
let error_result = test_result.clone();
|
let error_result = test_result.clone();
|
||||||
let thread_result = test_result.clone();
|
let thread_result = test_result.clone();
|
||||||
tokio::run(
|
tokio::run(
|
||||||
sender_future
|
sender_future
|
||||||
.select(receiver_future)
|
.select(receiver_future)
|
||||||
.timeout(Duration::from_millis(1000))
|
.timeout(Duration::from_millis(1000))
|
||||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
.map_err(move |_| error_result.store(false, Relaxed))
|
||||||
.map(move |result| {
|
.map(move |result| {
|
||||||
*thread_result.lock().unwrap() = result.0;
|
thread_result.store(result.0, Relaxed);
|
||||||
()
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(*test_result.lock().unwrap());
|
assert!(test_result.load(Relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -236,20 +236,19 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// execute the futures and check the result
|
// execute the futures and check the result
|
||||||
let test_result = Arc::new(Mutex::new(false));
|
let test_result = Arc::new(AtomicBool::new(false));
|
||||||
let error_result = test_result.clone();
|
let error_result = test_result.clone();
|
||||||
let thread_result = test_result.clone();
|
let thread_result = test_result.clone();
|
||||||
tokio::run(
|
tokio::run(
|
||||||
sender_future
|
sender_future
|
||||||
.select(receiver_future)
|
.select(receiver_future)
|
||||||
.timeout(Duration::from_millis(1000))
|
.timeout(Duration::from_millis(1000))
|
||||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
.map_err(move |_| error_result.store(false, Relaxed))
|
||||||
.map(move |result| {
|
.map(move |result| {
|
||||||
*thread_result.lock().unwrap() = result.0;
|
thread_result.store(result.0, Relaxed);
|
||||||
()
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(*test_result.lock().unwrap());
|
assert!(test_result.load(Relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -359,20 +358,19 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// execute the futures and check the result
|
// execute the futures and check the result
|
||||||
let test_result = Arc::new(Mutex::new(false));
|
let test_result = Arc::new(AtomicBool::new(false));
|
||||||
let error_result = test_result.clone();
|
let error_result = test_result.clone();
|
||||||
let thread_result = test_result.clone();
|
let thread_result = test_result.clone();
|
||||||
tokio::run(
|
tokio::run(
|
||||||
sender_future
|
sender_future
|
||||||
.select(receiver_future)
|
.select(receiver_future)
|
||||||
.timeout(Duration::from_millis(1000))
|
.timeout(Duration::from_millis(1000))
|
||||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
.map_err(move |_| error_result.store(false, Relaxed))
|
||||||
.map(move |result| {
|
.map(move |result| {
|
||||||
*thread_result.lock().unwrap() = result.0;
|
thread_result.store(result.0, Relaxed);
|
||||||
()
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(*test_result.lock().unwrap());
|
assert!(test_result.load(Relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -486,20 +484,19 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// execute the futures and check the result
|
// execute the futures and check the result
|
||||||
let test_result = Arc::new(Mutex::new(false));
|
let test_result = Arc::new(AtomicBool::new(false));
|
||||||
let error_result = test_result.clone();
|
let error_result = test_result.clone();
|
||||||
let thread_result = test_result.clone();
|
let thread_result = test_result.clone();
|
||||||
tokio::run(
|
tokio::run(
|
||||||
sender_future
|
sender_future
|
||||||
.select(receiver_future)
|
.select(receiver_future)
|
||||||
.timeout(Duration::from_millis(1000))
|
.timeout(Duration::from_millis(1000))
|
||||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
.map_err(move |_| error_result.store(false, Relaxed))
|
||||||
.map(move |result| {
|
.map(move |result| {
|
||||||
*thread_result.lock().unwrap() = result.0;
|
thread_result.store(result.0, Relaxed);
|
||||||
()
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(*test_result.lock().unwrap());
|
assert!(test_result.load(Relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -558,18 +555,17 @@ fn test_goodbye_rpc() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// execute the futures and check the result
|
// execute the futures and check the result
|
||||||
let test_result = Arc::new(Mutex::new(false));
|
let test_result = Arc::new(AtomicBool::new(false));
|
||||||
let error_result = test_result.clone();
|
let error_result = test_result.clone();
|
||||||
let thread_result = test_result.clone();
|
let thread_result = test_result.clone();
|
||||||
tokio::run(
|
tokio::run(
|
||||||
sender_future
|
sender_future
|
||||||
.select(receiver_future)
|
.select(receiver_future)
|
||||||
.timeout(Duration::from_millis(1000))
|
.timeout(Duration::from_millis(1000))
|
||||||
.map_err(move |_| *error_result.lock().unwrap() = false)
|
.map_err(move |_| error_result.store(false, Relaxed))
|
||||||
.map(move |result| {
|
.map(move |result| {
|
||||||
*thread_result.lock().unwrap() = result.0;
|
thread_result.store(result.0, Relaxed);
|
||||||
()
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert!(*test_result.lock().unwrap());
|
assert!(test_result.load(Relaxed));
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ pub fn genesis_deposits(
|
|||||||
let depth = spec.deposit_contract_tree_depth as usize;
|
let depth = spec.deposit_contract_tree_depth as usize;
|
||||||
let mut tree = MerkleTree::create(&[], depth);
|
let mut tree = MerkleTree::create(&[], depth);
|
||||||
for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() {
|
for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() {
|
||||||
if let Err(_) = tree.push_leaf(*deposit_leaf, depth) {
|
if tree.push_leaf(*deposit_leaf, depth).is_err() {
|
||||||
return Err(String::from("Failed to push leaf"));
|
return Err(String::from("Failed to push leaf"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,6 @@ fn basic() {
|
|||||||
spec.min_genesis_active_validator_count = 8;
|
spec.min_genesis_active_validator_count = 8;
|
||||||
|
|
||||||
let deposits = (0..spec.min_genesis_active_validator_count + 2)
|
let deposits = (0..spec.min_genesis_active_validator_count + 2)
|
||||||
.into_iter()
|
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
deposit_contract.deposit_helper::<MinimalEthSpec>(
|
deposit_contract.deposit_helper::<MinimalEthSpec>(
|
||||||
generate_deterministic_keypair(i as usize),
|
generate_deterministic_keypair(i as usize),
|
||||||
@ -73,7 +72,7 @@ fn basic() {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let deposit_future = deposit_contract.deposit_multiple(deposits.clone());
|
let deposit_future = deposit_contract.deposit_multiple(deposits);
|
||||||
|
|
||||||
let wait_future =
|
let wait_future =
|
||||||
service.wait_for_genesis_state::<MinimalEthSpec>(update_interval, spec.clone());
|
service.wait_for_genesis_state::<MinimalEthSpec>(update_interval, spec.clone());
|
||||||
|
@ -229,7 +229,7 @@ impl<T: BeaconChainTypes> MessageHandler<T> {
|
|||||||
.on_block_gossip(peer_id.clone(), block);
|
.on_block_gossip(peer_id.clone(), block);
|
||||||
// TODO: Apply more sophisticated validation and decoding logic
|
// TODO: Apply more sophisticated validation and decoding logic
|
||||||
if should_forward_on {
|
if should_forward_on {
|
||||||
self.propagate_message(id, peer_id.clone());
|
self.propagate_message(id, peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -203,7 +203,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
self.network
|
self.network
|
||||||
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
|
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
|
||||||
} else if remote.head_slot
|
} else if remote.head_slot
|
||||||
> self.chain.slot().unwrap_or_else(|_| Slot::from(0u64)) + FUTURE_SLOT_TOLERANCE
|
> self.chain.slot().unwrap_or_else(|_| Slot::from(0u64)) + FUTURE_SLOT_TOLERANCE
|
||||||
{
|
{
|
||||||
@ -219,7 +219,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
|
|||||||
"reason" => "different system clocks or genesis time"
|
"reason" => "different system clocks or genesis time"
|
||||||
);
|
);
|
||||||
self.network
|
self.network
|
||||||
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
|
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
|
||||||
} else if remote.finalized_epoch <= local.finalized_epoch
|
} else if remote.finalized_epoch <= local.finalized_epoch
|
||||||
&& remote.finalized_root != Hash256::zero()
|
&& remote.finalized_root != Hash256::zero()
|
||||||
&& local.finalized_root != Hash256::zero()
|
&& local.finalized_root != Hash256::zero()
|
||||||
@ -239,7 +239,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
|
|||||||
"reason" => "different finalized chain"
|
"reason" => "different finalized chain"
|
||||||
);
|
);
|
||||||
self.network
|
self.network
|
||||||
.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
|
.disconnect(peer_id, GoodbyeReason::IrrelevantNetwork);
|
||||||
} else if remote.finalized_epoch < local.finalized_epoch {
|
} else if remote.finalized_epoch < local.finalized_epoch {
|
||||||
// The node has a lower finalized epoch, their chain is not useful to us. There are two
|
// The node has a lower finalized epoch, their chain is not useful to us. There are two
|
||||||
// cases where a node can have a lower finalized epoch:
|
// cases where a node can have a lower finalized epoch:
|
||||||
@ -512,7 +512,7 @@ impl<T: BeaconChainTypes> MessageProcessor<T> {
|
|||||||
// Inform the sync manager to find parents for this block
|
// Inform the sync manager to find parents for this block
|
||||||
trace!(self.log, "Block with unknown parent received";
|
trace!(self.log, "Block with unknown parent received";
|
||||||
"peer_id" => format!("{:?}",peer_id));
|
"peer_id" => format!("{:?}",peer_id));
|
||||||
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block.clone())));
|
self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block)));
|
||||||
SHOULD_FORWARD_GOSSIP_BLOCK
|
SHOULD_FORWARD_GOSSIP_BLOCK
|
||||||
}
|
}
|
||||||
BlockProcessingOutcome::FutureSlot {
|
BlockProcessingOutcome::FutureSlot {
|
||||||
|
@ -263,7 +263,7 @@ fn network_service(
|
|||||||
id,
|
id,
|
||||||
source,
|
source,
|
||||||
message,
|
message,
|
||||||
topics: _,
|
..
|
||||||
} => {
|
} => {
|
||||||
message_handler_send
|
message_handler_send
|
||||||
.try_send(HandlerMessage::PubsubMessage(id, source, message))
|
.try_send(HandlerMessage::PubsubMessage(id, source, message))
|
||||||
|
@ -66,7 +66,7 @@ impl SyncNetworkContext {
|
|||||||
"count" => request.count,
|
"count" => request.count,
|
||||||
"peer" => format!("{:?}", peer_id)
|
"peer" => format!("{:?}", peer_id)
|
||||||
);
|
);
|
||||||
self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRange(request))
|
self.send_rpc_request(peer_id, RPCRequest::BlocksByRange(request))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocks_by_root_request(
|
pub fn blocks_by_root_request(
|
||||||
@ -81,7 +81,7 @@ impl SyncNetworkContext {
|
|||||||
"count" => request.block_roots.len(),
|
"count" => request.block_roots.len(),
|
||||||
"peer" => format!("{:?}", peer_id)
|
"peer" => format!("{:?}", peer_id)
|
||||||
);
|
);
|
||||||
self.send_rpc_request(peer_id.clone(), RPCRequest::BlocksByRoot(request))
|
self.send_rpc_request(peer_id, RPCRequest::BlocksByRoot(request))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn downvote_peer(&mut self, peer_id: PeerId) {
|
pub fn downvote_peer(&mut self, peer_id: PeerId) {
|
||||||
@ -91,7 +91,7 @@ impl SyncNetworkContext {
|
|||||||
"peer" => format!("{:?}", peer_id)
|
"peer" => format!("{:?}", peer_id)
|
||||||
);
|
);
|
||||||
// TODO: Implement reputation
|
// TODO: Implement reputation
|
||||||
self.disconnect(peer_id.clone(), GoodbyeReason::Fault);
|
self.disconnect(peer_id, GoodbyeReason::Fault);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
|
fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
|
||||||
|
@ -62,16 +62,16 @@ impl<T: EthSpec> PendingBatches<T> {
|
|||||||
let peer_request = batch.current_peer.clone();
|
let peer_request = batch.current_peer.clone();
|
||||||
self.peer_requests
|
self.peer_requests
|
||||||
.entry(peer_request)
|
.entry(peer_request)
|
||||||
.or_insert_with(|| HashSet::new())
|
.or_insert_with(HashSet::new)
|
||||||
.insert(request_id);
|
.insert(request_id);
|
||||||
self.batches.insert(request_id, batch)
|
self.batches.insert(request_id, batch)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove(&mut self, request_id: &RequestId) -> Option<Batch<T>> {
|
pub fn remove(&mut self, request_id: RequestId) -> Option<Batch<T>> {
|
||||||
if let Some(batch) = self.batches.remove(request_id) {
|
if let Some(batch) = self.batches.remove(&request_id) {
|
||||||
if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone())
|
if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone())
|
||||||
{
|
{
|
||||||
entry.get_mut().remove(request_id);
|
entry.get_mut().remove(&request_id);
|
||||||
|
|
||||||
if entry.get().is_empty() {
|
if entry.get().is_empty() {
|
||||||
entry.remove();
|
entry.remove();
|
||||||
@ -85,8 +85,8 @@ impl<T: EthSpec> PendingBatches<T> {
|
|||||||
|
|
||||||
/// Adds a block to the batches if the request id exists. Returns None if there is no batch
|
/// Adds a block to the batches if the request id exists. Returns None if there is no batch
|
||||||
/// matching the request id.
|
/// matching the request id.
|
||||||
pub fn add_block(&mut self, request_id: &RequestId, block: BeaconBlock<T>) -> Option<()> {
|
pub fn add_block(&mut self, request_id: RequestId, block: BeaconBlock<T>) -> Option<()> {
|
||||||
let batch = self.batches.get_mut(request_id)?;
|
let batch = self.batches.get_mut(&request_id)?;
|
||||||
batch.downloaded_blocks.push(block);
|
batch.downloaded_blocks.push(block);
|
||||||
Some(())
|
Some(())
|
||||||
}
|
}
|
||||||
@ -101,7 +101,7 @@ impl<T: EthSpec> PendingBatches<T> {
|
|||||||
pub fn remove_batch_by_peer(&mut self, peer_id: &PeerId) -> Option<Batch<T>> {
|
pub fn remove_batch_by_peer(&mut self, peer_id: &PeerId) -> Option<Batch<T>> {
|
||||||
let request_ids = self.peer_requests.get(peer_id)?;
|
let request_ids = self.peer_requests.get(peer_id)?;
|
||||||
|
|
||||||
let request_id = request_ids.iter().next()?.clone();
|
let request_id = *request_ids.iter().next()?;
|
||||||
self.remove(&request_id)
|
self.remove(request_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -144,11 +144,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
) -> Option<ProcessingResult> {
|
) -> Option<ProcessingResult> {
|
||||||
if let Some(block) = beacon_block {
|
if let Some(block) = beacon_block {
|
||||||
// This is not a stream termination, simply add the block to the request
|
// This is not a stream termination, simply add the block to the request
|
||||||
self.pending_batches.add_block(&request_id, block.clone())?;
|
self.pending_batches.add_block(request_id, block.clone())?;
|
||||||
return Some(ProcessingResult::KeepChain);
|
Some(ProcessingResult::KeepChain)
|
||||||
} else {
|
} else {
|
||||||
// A stream termination has been sent. This batch has ended. Process a completed batch.
|
// A stream termination has been sent. This batch has ended. Process a completed batch.
|
||||||
let batch = self.pending_batches.remove(&request_id)?;
|
let batch = self.pending_batches.remove(request_id)?;
|
||||||
Some(self.process_completed_batch(chain.clone(), network, batch, log))
|
Some(self.process_completed_batch(chain.clone(), network, batch, log))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -433,7 +433,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false;
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a peer if there exists a peer which does not currently have a pending request.
|
/// Returns a peer if there exists a peer which does not currently have a pending request.
|
||||||
@ -500,10 +500,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
network: &mut SyncNetworkContext,
|
network: &mut SyncNetworkContext,
|
||||||
peer_id: &PeerId,
|
peer_id: &PeerId,
|
||||||
request_id: &RequestId,
|
request_id: RequestId,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> Option<ProcessingResult> {
|
) -> Option<ProcessingResult> {
|
||||||
if let Some(batch) = self.pending_batches.remove(&request_id) {
|
if let Some(batch) = self.pending_batches.remove(request_id) {
|
||||||
warn!(log, "Batch failed. RPC Error"; "id" => batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id));
|
warn!(log, "Batch failed. RPC Error"; "id" => batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
Some(self.failed_batch(network, batch, log))
|
Some(self.failed_batch(network, batch, log))
|
||||||
|
@ -188,7 +188,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id));
|
debug!(self.log, "Adding peer to the existing head chain peer pool"; "head_root" => format!("{}",remote.head_root), "head_slot" => remote.head_slot, "peer_id" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
// add the peer to the head's pool
|
// add the peer to the head's pool
|
||||||
chain.add_peer(network, peer_id.clone(), &self.log);
|
chain.add_peer(network, peer_id, &self.log);
|
||||||
} else {
|
} else {
|
||||||
// There are no other head chains that match this peer's status, create a new one, and
|
// There are no other head chains that match this peer's status, create a new one, and
|
||||||
let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot);
|
let start_slot = std::cmp::min(local_info.head_slot, remote_finalized_slot);
|
||||||
@ -305,7 +305,8 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
/// retries. In this case, we need to remove the chain and re-status all the peers.
|
/// retries. In this case, we need to remove the chain and re-status all the peers.
|
||||||
fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) {
|
fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) {
|
||||||
let log_ref = &self.log;
|
let log_ref = &self.log;
|
||||||
match self.chains.head_finalized_request(|chain| {
|
if let Some((index, ProcessingResult::RemoveChain)) =
|
||||||
|
self.chains.head_finalized_request(|chain| {
|
||||||
if chain.peer_pool.remove(peer_id) {
|
if chain.peer_pool.remove(peer_id) {
|
||||||
// this chain contained the peer
|
// this chain contained the peer
|
||||||
while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) {
|
while let Some(batch) = chain.pending_batches.remove_batch_by_peer(peer_id) {
|
||||||
@ -321,14 +322,12 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}) {
|
})
|
||||||
Some((index, ProcessingResult::RemoveChain)) => {
|
{
|
||||||
// the chain needed to be removed
|
// the chain needed to be removed
|
||||||
debug!(self.log, "Chain being removed due to failed batch");
|
debug!(self.log, "Chain being removed due to failed batch");
|
||||||
self.chains.remove_chain(network, index, &self.log);
|
self.chains.remove_chain(network, index, &self.log);
|
||||||
}
|
}
|
||||||
_ => {} // chain didn't need to be removed, ignore
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An RPC error has occurred.
|
/// An RPC error has occurred.
|
||||||
@ -344,7 +343,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
// check that this request is pending
|
// check that this request is pending
|
||||||
let log_ref = &self.log;
|
let log_ref = &self.log;
|
||||||
match self.chains.head_finalized_request(|chain| {
|
match self.chains.head_finalized_request(|chain| {
|
||||||
chain.inject_error(network, &peer_id, &request_id, log_ref)
|
chain.inject_error(network, &peer_id, request_id, log_ref)
|
||||||
}) {
|
}) {
|
||||||
Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists
|
Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists
|
||||||
Some((index, ProcessingResult::RemoveChain)) => {
|
Some((index, ProcessingResult::RemoveChain)) => {
|
||||||
|
@ -145,7 +145,7 @@ pub fn state_at_slot<T: BeaconChainTypes>(
|
|||||||
// I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary.
|
// I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary.
|
||||||
Ok((
|
Ok((
|
||||||
beacon_chain.head()?.beacon_state_root,
|
beacon_chain.head()?.beacon_state_root,
|
||||||
beacon_chain.head()?.beacon_state.clone(),
|
beacon_chain.head()?.beacon_state,
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
let root = state_root_at_slot(beacon_chain, slot)?;
|
let root = state_root_at_slot(beacon_chain, slot)?;
|
||||||
@ -209,7 +209,7 @@ pub fn state_root_at_slot<T: BeaconChainTypes>(
|
|||||||
//
|
//
|
||||||
// Use `per_slot_processing` to advance the head state to the present slot,
|
// Use `per_slot_processing` to advance the head state to the present slot,
|
||||||
// assuming that all slots do not contain a block (i.e., they are skipped slots).
|
// assuming that all slots do not contain a block (i.e., they are skipped slots).
|
||||||
let mut state = beacon_chain.head()?.beacon_state.clone();
|
let mut state = beacon_chain.head()?.beacon_state;
|
||||||
let spec = &T::EthSpec::default_spec();
|
let spec = &T::EthSpec::default_spec();
|
||||||
|
|
||||||
for _ in state.slot.as_u64()..slot.as_u64() {
|
for _ in state.slot.as_u64()..slot.as_u64() {
|
||||||
|
@ -54,6 +54,8 @@ pub struct NetworkInfo<T: BeaconChainTypes> {
|
|||||||
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allowing more than 7 arguments.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn start_server<T: BeaconChainTypes>(
|
pub fn start_server<T: BeaconChainTypes>(
|
||||||
config: &Config,
|
config: &Config,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
|
@ -19,6 +19,8 @@ where
|
|||||||
Box::new(item.into_future())
|
Box::new(item.into_future())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allowing more than 7 arguments.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn route<T: BeaconChainTypes>(
|
pub fn route<T: BeaconChainTypes>(
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
|
@ -47,8 +47,7 @@ fn get_randao_reveal<T: BeaconChainTypes>(
|
|||||||
.head()
|
.head()
|
||||||
.expect("should get head")
|
.expect("should get head")
|
||||||
.beacon_state
|
.beacon_state
|
||||||
.fork
|
.fork;
|
||||||
.clone();
|
|
||||||
let proposer_index = beacon_chain
|
let proposer_index = beacon_chain
|
||||||
.block_proposer(slot)
|
.block_proposer(slot)
|
||||||
.expect("should get proposer index");
|
.expect("should get proposer index");
|
||||||
@ -69,8 +68,7 @@ fn sign_block<T: BeaconChainTypes>(
|
|||||||
.head()
|
.head()
|
||||||
.expect("should get head")
|
.expect("should get head")
|
||||||
.beacon_state
|
.beacon_state
|
||||||
.fork
|
.fork;
|
||||||
.clone();
|
|
||||||
let proposer_index = beacon_chain
|
let proposer_index = beacon_chain
|
||||||
.block_proposer(block.slot)
|
.block_proposer(block.slot)
|
||||||
.expect("should get proposer index");
|
.expect("should get proposer index");
|
||||||
@ -91,11 +89,7 @@ fn validator_produce_attestation() {
|
|||||||
.client
|
.client
|
||||||
.beacon_chain()
|
.beacon_chain()
|
||||||
.expect("client should have beacon chain");
|
.expect("client should have beacon chain");
|
||||||
let state = beacon_chain
|
let state = beacon_chain.head().expect("should get head").beacon_state;
|
||||||
.head()
|
|
||||||
.expect("should get head")
|
|
||||||
.beacon_state
|
|
||||||
.clone();
|
|
||||||
|
|
||||||
let validator_index = 0;
|
let validator_index = 0;
|
||||||
let duties = state
|
let duties = state
|
||||||
@ -169,7 +163,7 @@ fn validator_produce_attestation() {
|
|||||||
remote_node
|
remote_node
|
||||||
.http
|
.http
|
||||||
.validator()
|
.validator()
|
||||||
.publish_attestation(attestation.clone()),
|
.publish_attestation(attestation),
|
||||||
)
|
)
|
||||||
.expect("should publish attestation");
|
.expect("should publish attestation");
|
||||||
assert!(
|
assert!(
|
||||||
@ -344,7 +338,7 @@ fn validator_block_post() {
|
|||||||
remote_node
|
remote_node
|
||||||
.http
|
.http
|
||||||
.validator()
|
.validator()
|
||||||
.produce_block(slot, randao_reveal.clone()),
|
.produce_block(slot, randao_reveal),
|
||||||
)
|
)
|
||||||
.expect("should fetch block from http api");
|
.expect("should fetch block from http api");
|
||||||
|
|
||||||
@ -360,12 +354,12 @@ fn validator_block_post() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
sign_block(beacon_chain.clone(), &mut block, spec);
|
sign_block(beacon_chain, &mut block, spec);
|
||||||
let block_root = block.canonical_root();
|
let block_root = block.canonical_root();
|
||||||
|
|
||||||
let publish_status = env
|
let publish_status = env
|
||||||
.runtime()
|
.runtime()
|
||||||
.block_on(remote_node.http.validator().publish_block(block.clone()))
|
.block_on(remote_node.http.validator().publish_block(block))
|
||||||
.expect("should publish block");
|
.expect("should publish block");
|
||||||
|
|
||||||
if cfg!(not(feature = "fake_crypto")) {
|
if cfg!(not(feature = "fake_crypto")) {
|
||||||
@ -419,7 +413,7 @@ fn validator_block_get() {
|
|||||||
.expect("client should have beacon chain");
|
.expect("client should have beacon chain");
|
||||||
|
|
||||||
let slot = Slot::new(1);
|
let slot = Slot::new(1);
|
||||||
let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec);
|
let randao_reveal = get_randao_reveal(beacon_chain, slot, spec);
|
||||||
|
|
||||||
let block = env
|
let block = env
|
||||||
.runtime()
|
.runtime()
|
||||||
|
@ -268,7 +268,7 @@ pub fn get_configs<E: EthSpec>(
|
|||||||
if eth2_config.spec_constants != client_config.spec_constants {
|
if eth2_config.spec_constants != client_config.spec_constants {
|
||||||
crit!(log, "Specification constants do not match.";
|
crit!(log, "Specification constants do not match.";
|
||||||
"client_config" => client_config.spec_constants.to_string(),
|
"client_config" => client_config.spec_constants.to_string(),
|
||||||
"eth2_config" => eth2_config.spec_constants.to_string()
|
"eth2_config" => eth2_config.spec_constants
|
||||||
);
|
);
|
||||||
return Err("Specification constant mismatch".into());
|
return Err("Specification constant mismatch".into());
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,6 @@ fn get_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
state.validators = (0..validator_count)
|
state.validators = (0..validator_count)
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|&i| Validator {
|
.map(|&i| Validator {
|
||||||
@ -77,7 +76,7 @@ fn all_benches(c: &mut Criterion) {
|
|||||||
.sample_size(10),
|
.sample_size(10),
|
||||||
);
|
);
|
||||||
|
|
||||||
let inner_state = state.clone();
|
let inner_state = state;
|
||||||
c.bench(
|
c.bench(
|
||||||
&format!("{}_validators", validator_count),
|
&format!("{}_validators", validator_count),
|
||||||
Benchmark::new("encode/beacon_state/committee_cache[0]", move |b| {
|
Benchmark::new("encode/beacon_state/committee_cache[0]", move |b| {
|
||||||
|
@ -27,7 +27,6 @@ fn get_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
state.validators = (0..validator_count)
|
state.validators = (0..validator_count)
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|&i| Validator {
|
.map(|&i| Validator {
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use ssz::{Decode, DecodeError};
|
use ssz::{Decode, DecodeError};
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
fn get_block_bytes<T: Store<E>, E: EthSpec>(
|
fn get_block_bytes<T: Store<E>, E: EthSpec>(
|
||||||
store: &T,
|
store: &T,
|
||||||
@ -45,12 +46,10 @@ fn get_at_preceeding_slot<T: Store<E>, E: EthSpec>(
|
|||||||
if let Some(bytes) = get_block_bytes::<_, E>(store, root)? {
|
if let Some(bytes) = get_block_bytes::<_, E>(store, root)? {
|
||||||
let this_slot = read_slot_from_block_bytes(&bytes)?;
|
let this_slot = read_slot_from_block_bytes(&bytes)?;
|
||||||
|
|
||||||
if this_slot == slot {
|
match this_slot.cmp(&slot) {
|
||||||
break Ok(Some((root, bytes)));
|
Ordering::Equal => break Ok(Some((root, bytes))),
|
||||||
} else if this_slot < slot {
|
Ordering::Less => break Ok(None),
|
||||||
break Ok(None);
|
Ordering::Greater => root = read_parent_root_from_block_bytes(&bytes)?,
|
||||||
} else {
|
|
||||||
root = read_parent_root_from_block_bytes(&bytes)?;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
break Ok(None);
|
break Ok(None);
|
||||||
|
@ -237,7 +237,7 @@ mod test {
|
|||||||
.add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i))
|
.add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i))
|
||||||
.expect("add_block_root ok");
|
.expect("add_block_root ok");
|
||||||
|
|
||||||
let expected = (1..i + 1)
|
let expected = (1..=i)
|
||||||
.rev()
|
.rev()
|
||||||
.map(|j| (int_hash(j), Slot::new(j)))
|
.map(|j| (int_hash(j), Slot::new(j)))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@ -262,12 +262,12 @@ mod test {
|
|||||||
.add_block_root(int_hash(i), int_hash(i - step_length), Slot::new(i))
|
.add_block_root(int_hash(i), int_hash(i - step_length), Slot::new(i))
|
||||||
.expect("add_block_root ok");
|
.expect("add_block_root ok");
|
||||||
|
|
||||||
let sparse_expected = (1..i + 1)
|
let sparse_expected = (1..=i)
|
||||||
.rev()
|
.rev()
|
||||||
.step_by(step_length as usize)
|
.step_by(step_length as usize)
|
||||||
.map(|j| (int_hash(j), Slot::new(j)))
|
.map(|j| (int_hash(j), Slot::new(j)))
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
let every_slot_expected = (1..i + 1)
|
let every_slot_expected = (1..=i)
|
||||||
.rev()
|
.rev()
|
||||||
.map(|j| {
|
.map(|j| {
|
||||||
let nearest = 1 + (j - 1) / step_length * step_length;
|
let nearest = 1 + (j - 1) / step_length * step_length;
|
||||||
@ -343,10 +343,9 @@ mod test {
|
|||||||
|
|
||||||
// Check that advancing the finalized root onto one side completely removes the other
|
// Check that advancing the finalized root onto one side completely removes the other
|
||||||
// side.
|
// side.
|
||||||
let fin_tree = tree.clone();
|
let fin_tree = tree;
|
||||||
let prune_point = num_blocks / 2;
|
let prune_point = num_blocks / 2;
|
||||||
let remaining_fork1_blocks = all_fork1_blocks
|
let remaining_fork1_blocks = all_fork1_blocks
|
||||||
.clone()
|
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.take_while(|(_, slot)| *slot >= prune_point)
|
.take_while(|(_, slot)| *slot >= prune_point)
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
|
@ -185,7 +185,7 @@ pub trait Field<E: EthSpec>: Copy {
|
|||||||
.values
|
.values
|
||||||
.first()
|
.first()
|
||||||
.cloned()
|
.cloned()
|
||||||
.ok_or(ChunkError::MissingGenesisValue.into())
|
.ok_or_else(|| ChunkError::MissingGenesisValue.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the given `value` as the genesis value for this field, unless stored already.
|
/// Store the given `value` as the genesis value for this field, unless stored already.
|
||||||
@ -685,7 +685,7 @@ mod test {
|
|||||||
];
|
];
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
stitch(chunks.clone(), 2, 6, chunk_size, 12, 99).unwrap(),
|
stitch(chunks, 2, 6, chunk_size, 12, 99).unwrap(),
|
||||||
vec![99, 99, 2, 3, 4, 5, 99, 99, 99, 99, 99, 99]
|
vec![99, 99, 2, 3, 4, 5, 99, 99, 99, 99, 99, 99]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -707,7 +707,7 @@ mod test {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
stitch(chunks.clone(), 2, 10, chunk_size, 8, default).unwrap(),
|
stitch(chunks, 2, 10, chunk_size, 8, default).unwrap(),
|
||||||
vec![v(8), v(9), v(2), v(3), v(4), v(5), v(6), v(7)]
|
vec![v(8), v(9), v(2), v(3), v(4), v(5), v(6), v(7)]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,9 @@ pub struct SimpleForwardsBlockRootsIterator {
|
|||||||
/// Fusion of the above two approaches to forwards iteration. Fast and efficient.
|
/// Fusion of the above two approaches to forwards iteration. Fast and efficient.
|
||||||
pub enum HybridForwardsBlockRootsIterator<E: EthSpec> {
|
pub enum HybridForwardsBlockRootsIterator<E: EthSpec> {
|
||||||
PreFinalization {
|
PreFinalization {
|
||||||
iter: FrozenForwardsBlockRootsIterator<E>,
|
iter: Box<FrozenForwardsBlockRootsIterator<E>>,
|
||||||
/// Data required by the `PostFinalization` iterator when we get to it.
|
/// Data required by the `PostFinalization` iterator when we get to it.
|
||||||
continuation_data: Option<(BeaconState<E>, Hash256)>,
|
continuation_data: Box<Option<(BeaconState<E>, Hash256)>>,
|
||||||
},
|
},
|
||||||
PostFinalization {
|
PostFinalization {
|
||||||
iter: SimpleForwardsBlockRootsIterator,
|
iter: SimpleForwardsBlockRootsIterator,
|
||||||
@ -99,13 +99,13 @@ impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> {
|
|||||||
|
|
||||||
if start_slot < latest_restore_point_slot {
|
if start_slot < latest_restore_point_slot {
|
||||||
PreFinalization {
|
PreFinalization {
|
||||||
iter: FrozenForwardsBlockRootsIterator::new(
|
iter: Box::new(FrozenForwardsBlockRootsIterator::new(
|
||||||
store,
|
store,
|
||||||
start_slot,
|
start_slot,
|
||||||
latest_restore_point_slot,
|
latest_restore_point_slot,
|
||||||
spec,
|
spec,
|
||||||
),
|
)),
|
||||||
continuation_data: Some((end_state, end_block_root)),
|
continuation_data: Box::new(Some((end_state, end_block_root))),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PostFinalization {
|
PostFinalization {
|
||||||
|
@ -145,14 +145,15 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
let current_split_slot = store.get_split_slot();
|
let current_split_slot = store.get_split_slot();
|
||||||
|
|
||||||
if frozen_head.slot < current_split_slot {
|
if frozen_head.slot < current_split_slot {
|
||||||
Err(HotColdDBError::FreezeSlotError {
|
return Err(HotColdDBError::FreezeSlotError {
|
||||||
current_split_slot,
|
current_split_slot,
|
||||||
proposed_split_slot: frozen_head.slot,
|
proposed_split_slot: frozen_head.slot,
|
||||||
})?;
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if frozen_head.slot % E::slots_per_epoch() != 0 {
|
if frozen_head.slot % E::slots_per_epoch() != 0 {
|
||||||
Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot))?;
|
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
||||||
@ -574,7 +575,7 @@ impl<E: EthSpec> HotColdDB<E> {
|
|||||||
let key = Self::restore_point_key(restore_point_index);
|
let key = Self::restore_point_key(restore_point_index);
|
||||||
RestorePointHash::db_get(&self.cold_db, &key)?
|
RestorePointHash::db_get(&self.cold_db, &key)?
|
||||||
.map(|r| r.state_root)
|
.map(|r| r.state_root)
|
||||||
.ok_or(HotColdDBError::MissingRestorePointHash(restore_point_index).into())
|
.ok_or_else(|| HotColdDBError::MissingRestorePointHash(restore_point_index).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the state root of a restore point.
|
/// Store the state root of a restore point.
|
||||||
|
@ -345,7 +345,7 @@ mod test {
|
|||||||
state_b.state_roots[0] = state_a_root;
|
state_b.state_roots[0] = state_a_root;
|
||||||
store.put_state(&state_a_root, &state_a).unwrap();
|
store.put_state(&state_a_root, &state_a).unwrap();
|
||||||
|
|
||||||
let iter = BlockRootsIterator::new(store.clone(), &state_b);
|
let iter = BlockRootsIterator::new(store, &state_b);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
iter.clone().any(|(_root, slot)| slot == 0),
|
iter.clone().any(|(_root, slot)| slot == 0),
|
||||||
@ -394,7 +394,7 @@ mod test {
|
|||||||
store.put_state(&state_a_root, &state_a).unwrap();
|
store.put_state(&state_a_root, &state_a).unwrap();
|
||||||
store.put_state(&state_b_root, &state_b).unwrap();
|
store.put_state(&state_b_root, &state_b).unwrap();
|
||||||
|
|
||||||
let iter = StateRootsIterator::new(store.clone(), &state_b);
|
let iter = StateRootsIterator::new(store, &state_b);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
iter.clone().any(|(_root, slot)| slot == 0),
|
iter.clone().any(|(_root, slot)| slot == 0),
|
||||||
|
@ -47,11 +47,7 @@ impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
|||||||
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = Self::get_key_for_col(col, key);
|
||||||
|
|
||||||
Ok(self
|
Ok(self.db.read().get(&column_key).cloned())
|
||||||
.db
|
|
||||||
.read()
|
|
||||||
.get(&column_key)
|
|
||||||
.and_then(|val| Some(val.clone())))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Puts a key in the database.
|
/// Puts a key in the database.
|
||||||
|
@ -60,13 +60,12 @@ impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MpscSender<E> = mpsc::Sender<(Hash256, BeaconState<E>)>;
|
||||||
|
|
||||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||||
pub struct BackgroundMigrator<E: EthSpec> {
|
pub struct BackgroundMigrator<E: EthSpec> {
|
||||||
db: Arc<DiskStore<E>>,
|
db: Arc<DiskStore<E>>,
|
||||||
tx_thread: Mutex<(
|
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||||
mpsc::Sender<(Hash256, BeaconState<E>)>,
|
|
||||||
thread::JoinHandle<()>,
|
|
||||||
)>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||||
|
@ -8,6 +8,7 @@ use itertools::Itertools;
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
@ -182,13 +183,15 @@ impl ReducedTreeSsz {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_reduced_tree<T, E>(
|
pub fn into_reduced_tree<T, E>(
|
||||||
self,
|
self,
|
||||||
store: Arc<T>,
|
store: Arc<T>,
|
||||||
block_root_tree: Arc<BlockRootTree>,
|
block_root_tree: Arc<BlockRootTree>,
|
||||||
) -> Result<ReducedTree<T, E>> {
|
) -> Result<ReducedTree<T, E>> {
|
||||||
if self.node_hashes.len() != self.nodes.len() {
|
if self.node_hashes.len() != self.nodes.len() {
|
||||||
Error::InvalidReducedTreeSsz("node_hashes and nodes should have equal length".into());
|
return Err(Error::InvalidReducedTreeSsz(
|
||||||
|
"node_hashes and nodes should have equal length".to_string(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
let nodes: HashMap<_, _> = self
|
let nodes: HashMap<_, _> = self
|
||||||
.node_hashes
|
.node_hashes
|
||||||
@ -740,16 +743,19 @@ where
|
|||||||
if a_slot < self.root.1 || b_slot < self.root.1 {
|
if a_slot < self.root.1 || b_slot < self.root.1 {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
if a_slot < b_slot {
|
match a_slot.cmp(&b_slot) {
|
||||||
|
Ordering::Less => {
|
||||||
for _ in a_slot.as_u64()..b_slot.as_u64() {
|
for _ in a_slot.as_u64()..b_slot.as_u64() {
|
||||||
b_root = b_iter.next()?.0;
|
b_root = b_iter.next()?.0;
|
||||||
}
|
}
|
||||||
} else if a_slot > b_slot {
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
for _ in b_slot.as_u64()..a_slot.as_u64() {
|
for _ in b_slot.as_u64()..a_slot.as_u64() {
|
||||||
a_root = a_iter.next()?.0;
|
a_root = a_iter.next()?.0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ordering::Equal => (),
|
||||||
|
}
|
||||||
Some((a_root, b_root))
|
Some((a_root, b_root))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -876,7 +882,7 @@ where
|
|||||||
block_root_tree: Arc<BlockRootTree>,
|
block_root_tree: Arc<BlockRootTree>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let reduced_tree_ssz = ReducedTreeSsz::from_ssz_bytes(bytes)?;
|
let reduced_tree_ssz = ReducedTreeSsz::from_ssz_bytes(bytes)?;
|
||||||
Ok(reduced_tree_ssz.to_reduced_tree(store, block_root_tree)?)
|
Ok(reduced_tree_ssz.into_reduced_tree(store, block_root_tree)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1013,8 +1019,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree);
|
let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree);
|
||||||
let bytes = tree.as_bytes();
|
let bytes = tree.as_bytes();
|
||||||
let recovered_tree =
|
let recovered_tree = ReducedTree::from_bytes(&bytes, store, block_root_tree).unwrap();
|
||||||
ReducedTree::from_bytes(&bytes, store.clone(), block_root_tree).unwrap();
|
|
||||||
|
|
||||||
let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree);
|
let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree);
|
||||||
assert_eq!(ssz_tree, recovered_ssz);
|
assert_eq!(ssz_tree, recovered_ssz);
|
||||||
|
@ -167,7 +167,7 @@ mod test {
|
|||||||
HashSet::from_iter(vec![5, 6, 7, 8]), // 4, 4*
|
HashSet::from_iter(vec![5, 6, 7, 8]), // 4, 4*
|
||||||
HashSet::from_iter(vec![0, 1, 2, 3, 4]), // 5*
|
HashSet::from_iter(vec![0, 1, 2, 3, 4]), // 5*
|
||||||
];
|
];
|
||||||
let cover = maximum_cover(sets.clone(), 3);
|
let cover = maximum_cover(sets, 3);
|
||||||
assert_eq!(quality(&cover), 11);
|
assert_eq!(quality(&cover), 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ mod test {
|
|||||||
HashSet::from_iter(vec![1, 5, 6, 8]),
|
HashSet::from_iter(vec![1, 5, 6, 8]),
|
||||||
HashSet::from_iter(vec![1, 7, 11, 19]),
|
HashSet::from_iter(vec![1, 7, 11, 19]),
|
||||||
];
|
];
|
||||||
let cover = maximum_cover(sets.clone(), 5);
|
let cover = maximum_cover(sets, 5);
|
||||||
assert_eq!(quality(&cover), 19);
|
assert_eq!(quality(&cover), 19);
|
||||||
assert_eq!(cover.len(), 5);
|
assert_eq!(cover.len(), 5);
|
||||||
}
|
}
|
||||||
|
@ -311,11 +311,7 @@ fn bench_block<T: EthSpec>(
|
|||||||
)
|
)
|
||||||
.expect("should get indexed attestation");
|
.expect("should get indexed attestation");
|
||||||
|
|
||||||
(
|
(local_spec.clone(), local_state.clone(), indexed_attestation)
|
||||||
local_spec.clone(),
|
|
||||||
local_state.clone(),
|
|
||||||
indexed_attestation.clone(),
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
|(spec, ref mut state, indexed_attestation)| {
|
|(spec, ref mut state, indexed_attestation)| {
|
||||||
black_box(
|
black_box(
|
||||||
@ -349,11 +345,7 @@ fn bench_block<T: EthSpec>(
|
|||||||
)
|
)
|
||||||
.expect("should get indexed attestation");
|
.expect("should get indexed attestation");
|
||||||
|
|
||||||
(
|
(local_spec.clone(), local_state.clone(), indexed_attestation)
|
||||||
local_spec.clone(),
|
|
||||||
local_state.clone(),
|
|
||||||
indexed_attestation.clone(),
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
|(spec, ref mut state, indexed_attestation)| {
|
|(spec, ref mut state, indexed_attestation)| {
|
||||||
black_box(
|
black_box(
|
||||||
@ -373,7 +365,7 @@ fn bench_block<T: EthSpec>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let local_block = block.clone();
|
let local_block = block.clone();
|
||||||
let local_state = state.clone();
|
let local_state = state;
|
||||||
c.bench(
|
c.bench(
|
||||||
&title,
|
&title,
|
||||||
Benchmark::new("get_attesting_indices", move |b| {
|
Benchmark::new("get_attesting_indices", move |b| {
|
||||||
@ -409,7 +401,7 @@ fn bench_block<T: EthSpec>(
|
|||||||
.sample_size(10),
|
.sample_size(10),
|
||||||
);
|
);
|
||||||
|
|
||||||
let local_block = block.clone();
|
let local_block = block;
|
||||||
c.bench(
|
c.bench(
|
||||||
&title,
|
&title,
|
||||||
Benchmark::new("ssz_block_len", move |b| {
|
Benchmark::new("ssz_block_len", move |b| {
|
||||||
|
@ -29,7 +29,7 @@ where
|
|||||||
F: FnMut(&mut BlockBuilder<T>),
|
F: FnMut(&mut BlockBuilder<T>),
|
||||||
G: FnMut(&mut BeaconBlock<T>),
|
G: FnMut(&mut BeaconBlock<T>),
|
||||||
{
|
{
|
||||||
let (mut block, state) = get_block::<T, _>(mutate_builder);
|
let (mut block, mut state) = get_block::<T, _>(mutate_builder);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Control check to ensure the valid block should pass verification.
|
* Control check to ensure the valid block should pass verification.
|
||||||
@ -79,7 +79,7 @@ where
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state.clone(),
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
None,
|
None,
|
||||||
BlockSignatureStrategy::VerifyBulk,
|
BlockSignatureStrategy::VerifyBulk,
|
||||||
|
@ -22,7 +22,6 @@ fn get_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
state.validators = (0..validator_count)
|
state.validators = (0..validator_count)
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|&i| Validator {
|
.map(|&i| Validator {
|
||||||
@ -91,7 +90,7 @@ fn all_benches(c: &mut Criterion) {
|
|||||||
.sample_size(10),
|
.sample_size(10),
|
||||||
);
|
);
|
||||||
|
|
||||||
let inner_state = state.clone();
|
let inner_state = state;
|
||||||
c.bench(
|
c.bench(
|
||||||
&format!("{}_validators", validator_count),
|
&format!("{}_validators", validator_count),
|
||||||
Benchmark::new("clone_without_caches/beacon_state", move |b| {
|
Benchmark::new("clone_without_caches/beacon_state", move |b| {
|
||||||
|
@ -71,13 +71,13 @@ impl<T: EthSpec> Attestation<T> {
|
|||||||
if self
|
if self
|
||||||
.aggregation_bits
|
.aggregation_bits
|
||||||
.get(committee_position)
|
.get(committee_position)
|
||||||
.map_err(|e| Error::SszTypesError(e))?
|
.map_err(Error::SszTypesError)?
|
||||||
{
|
{
|
||||||
Err(Error::AlreadySigned(committee_position))
|
Err(Error::AlreadySigned(committee_position))
|
||||||
} else {
|
} else {
|
||||||
self.aggregation_bits
|
self.aggregation_bits
|
||||||
.set(committee_position, true)
|
.set(committee_position, true)
|
||||||
.map_err(|e| Error::SszTypesError(e))?;
|
.map_err(Error::SszTypesError)?;
|
||||||
|
|
||||||
let message = self.data.tree_hash_root();
|
let message = self.data.tree_hash_root();
|
||||||
let domain = spec.get_domain(self.data.target.epoch, Domain::BeaconAttester, fork);
|
let domain = spec.get_domain(self.data.target.epoch, Domain::BeaconAttester, fork);
|
||||||
|
@ -897,7 +897,7 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
.enumerate()
|
.enumerate()
|
||||||
.skip(self.pubkey_cache.len())
|
.skip(self.pubkey_cache.len())
|
||||||
{
|
{
|
||||||
let success = self.pubkey_cache.insert(validator.pubkey.clone().into(), i);
|
let success = self.pubkey_cache.insert(validator.pubkey.clone(), i);
|
||||||
if !success {
|
if !success {
|
||||||
return Err(Error::PubkeyCacheInconsistent);
|
return Err(Error::PubkeyCacheInconsistent);
|
||||||
}
|
}
|
||||||
@ -957,7 +957,7 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
validator
|
validator
|
||||||
.pubkey
|
.pubkey
|
||||||
.decompress()
|
.decompress()
|
||||||
.map_err(|e| Error::InvalidValidatorPubkey(e))
|
.map_err(Error::InvalidValidatorPubkey)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -24,9 +24,8 @@ impl<T: EthSpec> TestingAttestationBuilder<T> {
|
|||||||
|
|
||||||
let mut aggregation_bits_len = committee.len();
|
let mut aggregation_bits_len = committee.len();
|
||||||
|
|
||||||
match test_task {
|
if test_task == AttestationTestTask::BadAggregationBitfieldLen {
|
||||||
AttestationTestTask::BadAggregationBitfieldLen => aggregation_bits_len += 1,
|
aggregation_bits_len += 1
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap();
|
let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap();
|
||||||
|
@ -15,7 +15,7 @@ pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs";
|
|||||||
/// `./keypairs.raw_keypairs`.
|
/// `./keypairs.raw_keypairs`.
|
||||||
pub fn keypairs_path() -> PathBuf {
|
pub fn keypairs_path() -> PathBuf {
|
||||||
let dir = dirs::home_dir()
|
let dir = dirs::home_dir()
|
||||||
.and_then(|home| Some(home.join(".lighthouse")))
|
.map(|home| (home.join(".lighthouse")))
|
||||||
.unwrap_or_else(|| PathBuf::from(""));
|
.unwrap_or_else(|| PathBuf::from(""));
|
||||||
dir.join(KEYPAIRS_FILE)
|
dir.join(KEYPAIRS_FILE)
|
||||||
}
|
}
|
||||||
@ -42,7 +42,7 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
|
|||||||
/// If the file does not contain enough keypairs or is invalid.
|
/// If the file does not contain enough keypairs or is invalid.
|
||||||
pub fn from_default_keypairs_file_if_exists(validator_count: usize, spec: &ChainSpec) -> Self {
|
pub fn from_default_keypairs_file_if_exists(validator_count: usize, spec: &ChainSpec) -> Self {
|
||||||
let dir = dirs::home_dir()
|
let dir = dirs::home_dir()
|
||||||
.and_then(|home| Some(home.join(".lighthouse")))
|
.map(|home| home.join(".lighthouse"))
|
||||||
.unwrap_or_else(|| PathBuf::from(""));
|
.unwrap_or_else(|| PathBuf::from(""));
|
||||||
let file = dir.join(KEYPAIRS_FILE);
|
let file = dir.join(KEYPAIRS_FILE);
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ impl TestingProposerSlashingBuilder {
|
|||||||
let slot = Slot::new(0);
|
let slot = Slot::new(0);
|
||||||
let hash_1 = Hash256::from([1; 32]);
|
let hash_1 = Hash256::from([1; 32]);
|
||||||
let hash_2 = if test_task == ProposerSlashingTestTask::ProposalsIdentical {
|
let hash_2 = if test_task == ProposerSlashingTestTask::ProposalsIdentical {
|
||||||
hash_1.clone()
|
hash_1
|
||||||
} else {
|
} else {
|
||||||
Hash256::from([2; 32])
|
Hash256::from([2; 32])
|
||||||
};
|
};
|
||||||
|
@ -99,9 +99,10 @@ impl AggregateSignature {
|
|||||||
for byte in bytes {
|
for byte in bytes {
|
||||||
if *byte != 0 {
|
if *byte != 0 {
|
||||||
let sig = RawAggregateSignature::from_bytes(&bytes).map_err(|_| {
|
let sig = RawAggregateSignature::from_bytes(&bytes).map_err(|_| {
|
||||||
DecodeError::BytesInvalid(
|
DecodeError::BytesInvalid(format!(
|
||||||
format!("Invalid AggregateSignature bytes: {:?}", bytes).to_string(),
|
"Invalid AggregateSignature bytes: {:?}",
|
||||||
)
|
bytes
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
return Ok(Self {
|
return Ok(Self {
|
||||||
|
@ -39,7 +39,7 @@ impl PublicKey {
|
|||||||
/// Converts compressed bytes to PublicKey
|
/// Converts compressed bytes to PublicKey
|
||||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
||||||
let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| {
|
let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| {
|
||||||
DecodeError::BytesInvalid(format!("Invalid PublicKey bytes: {:?}", bytes).to_string())
|
DecodeError::BytesInvalid(format!("Invalid PublicKey bytes: {:?}", bytes))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(PublicKey(pubkey))
|
Ok(PublicKey(pubkey))
|
||||||
|
@ -81,9 +81,7 @@ impl Signature {
|
|||||||
for byte in bytes {
|
for byte in bytes {
|
||||||
if *byte != 0 {
|
if *byte != 0 {
|
||||||
let raw_signature = RawSignature::from_bytes(&bytes).map_err(|_| {
|
let raw_signature = RawSignature::from_bytes(&bytes).map_err(|_| {
|
||||||
DecodeError::BytesInvalid(
|
DecodeError::BytesInvalid(format!("Invalid Signature bytes: {:?}", bytes))
|
||||||
format!("Invalid Signature bytes: {:?}", bytes).to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
})?;
|
||||||
return Ok(Signature {
|
return Ok(Signature {
|
||||||
signature: raw_signature,
|
signature: raw_signature,
|
||||||
|
@ -158,17 +158,17 @@ fn aggregate_public_keys<'a>(public_keys: &'a [Cow<'a, G1Point>]) -> G1Point {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait G1Ref {
|
pub trait G1Ref {
|
||||||
fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point>;
|
fn g1_ref(&self) -> Cow<'_, G1Point>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl G1Ref for AggregatePublicKey {
|
impl G1Ref for AggregatePublicKey {
|
||||||
fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point> {
|
fn g1_ref(&self) -> Cow<'_, G1Point> {
|
||||||
Cow::Borrowed(&self.as_raw().point)
|
Cow::Borrowed(&self.as_raw().point)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl G1Ref for PublicKey {
|
impl G1Ref for PublicKey {
|
||||||
fn g1_ref<'a>(&'a self) -> Cow<'a, G1Point> {
|
fn g1_ref(&self) -> Cow<'_, G1Point> {
|
||||||
Cow::Borrowed(&self.as_raw().point)
|
Cow::Borrowed(&self.as_raw().point)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -131,8 +131,8 @@ fn variable_list_h256_test<Len: Unsigned>(leaves_and_skips: Vec<(u64, bool)>) ->
|
|||||||
for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() {
|
for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() {
|
||||||
list = VariableList::new(leaves[..end].to_vec()).unwrap();
|
list = VariableList::new(leaves[..end].to_vec()).unwrap();
|
||||||
|
|
||||||
if update_cache {
|
if update_cache
|
||||||
if list
|
&& list
|
||||||
.recalculate_tree_hash_root(&mut cache)
|
.recalculate_tree_hash_root(&mut cache)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_bytes()
|
.as_bytes()
|
||||||
@ -141,7 +141,5 @@ fn variable_list_h256_test<Len: Unsigned>(leaves_and_skips: Vec<(u64, bool)>) ->
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ mod tests {
|
|||||||
let spec = &E::default_spec();
|
let spec = &E::default_spec();
|
||||||
|
|
||||||
let keypair = generate_deterministic_keypair(42);
|
let keypair = generate_deterministic_keypair(42);
|
||||||
let deposit = get_deposit(keypair.clone(), spec);
|
let deposit = get_deposit(keypair, spec);
|
||||||
|
|
||||||
let data = eth1_tx_data(&deposit).expect("should produce tx data");
|
let data = eth1_tx_data(&deposit).expect("should produce tx data");
|
||||||
|
|
||||||
|
@ -123,8 +123,7 @@ fn string_to_bytes(string: &str) -> Result<Vec<u8>, String> {
|
|||||||
/// Uses this as reference:
|
/// Uses this as reference:
|
||||||
/// https://github.com/ethereum/eth2.0-pm/blob/9a9dbcd95e2b8e10287797bd768014ab3d842e99/interop/mocked_start/keygen_10_validators.yaml
|
/// https://github.com/ethereum/eth2.0-pm/blob/9a9dbcd95e2b8e10287797bd768014ab3d842e99/interop/mocked_start/keygen_10_validators.yaml
|
||||||
pub fn keypairs_from_yaml_file(path: PathBuf) -> Result<Vec<Keypair>, String> {
|
pub fn keypairs_from_yaml_file(path: PathBuf) -> Result<Vec<Keypair>, String> {
|
||||||
let file =
|
let file = File::open(path).map_err(|e| format!("Unable to open YAML key file: {}", e))?;
|
||||||
File::open(path.clone()).map_err(|e| format!("Unable to open YAML key file: {}", e))?;
|
|
||||||
|
|
||||||
serde_yaml::from_reader::<_, Vec<YamlKeypair>>(file)
|
serde_yaml::from_reader::<_, Vec<YamlKeypair>>(file)
|
||||||
.map_err(|e| format!("Could not parse YAML: {:?}", e))?
|
.map_err(|e| format!("Could not parse YAML: {:?}", e))?
|
||||||
|
@ -227,7 +227,7 @@ mod tests {
|
|||||||
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
|
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
|
||||||
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
|
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
|
||||||
|
|
||||||
do_test::<E>(boot_enr, genesis_state.clone(), yaml_config.clone());
|
do_test::<E>(boot_enr, genesis_state, yaml_config);
|
||||||
do_test::<E>(None, None, None);
|
do_test::<E>(None, None, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,13 +237,13 @@ mod tests {
|
|||||||
yaml_config: Option<YamlConfig>,
|
yaml_config: Option<YamlConfig>,
|
||||||
) {
|
) {
|
||||||
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
|
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
|
||||||
let base_dir = PathBuf::from(temp_dir.path().join("my_testnet"));
|
let base_dir = temp_dir.path().join("my_testnet");
|
||||||
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
|
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
|
||||||
let deposit_contract_deploy_block = 42;
|
let deposit_contract_deploy_block = 42;
|
||||||
|
|
||||||
let testnet: Eth2TestnetConfig<E> = Eth2TestnetConfig {
|
let testnet: Eth2TestnetConfig<E> = Eth2TestnetConfig {
|
||||||
deposit_contract_address: deposit_contract_address.clone(),
|
deposit_contract_address,
|
||||||
deposit_contract_deploy_block: deposit_contract_deploy_block,
|
deposit_contract_deploy_block,
|
||||||
boot_enr,
|
boot_enr,
|
||||||
genesis_state,
|
genesis_state,
|
||||||
yaml_config,
|
yaml_config,
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#![allow(clippy::needless_doctest_main)]
|
||||||
//! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry
|
//! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry
|
||||||
//! and functions to add and use the following components (more info at
|
//! and functions to add and use the following components (more info at
|
||||||
//! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)):
|
//! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)):
|
||||||
|
@ -99,7 +99,7 @@ impl<'a> SszDecoderBuilder<'a> {
|
|||||||
let previous_offset = self
|
let previous_offset = self
|
||||||
.offsets
|
.offsets
|
||||||
.last()
|
.last()
|
||||||
.and_then(|o| Some(o.offset))
|
.map(|o| o.offset)
|
||||||
.unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET);
|
.unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET);
|
||||||
|
|
||||||
if (previous_offset > offset) || (offset > self.bytes.len()) {
|
if (previous_offset > offset) || (offset > self.bytes.len()) {
|
||||||
@ -179,7 +179,7 @@ impl<'a> SszDecoderBuilder<'a> {
|
|||||||
/// b: Vec<u16>,
|
/// b: Vec<u16>,
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn main() {
|
/// fn ssz_decoding_example() {
|
||||||
/// let foo = Foo {
|
/// let foo = Foo {
|
||||||
/// a: 42,
|
/// a: 42,
|
||||||
/// b: vec![1, 3, 3, 7]
|
/// b: vec![1, 3, 3, 7]
|
||||||
|
@ -207,9 +207,10 @@ impl Decode for bool {
|
|||||||
match bytes[0] {
|
match bytes[0] {
|
||||||
0b0000_0000 => Ok(false),
|
0b0000_0000 => Ok(false),
|
||||||
0b0000_0001 => Ok(true),
|
0b0000_0001 => Ok(true),
|
||||||
_ => Err(DecodeError::BytesInvalid(
|
_ => Err(DecodeError::BytesInvalid(format!(
|
||||||
format!("Out-of-range for boolean: {}", bytes[0]).to_string(),
|
"Out-of-range for boolean: {}",
|
||||||
)),
|
bytes[0]
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ pub trait Encode {
|
|||||||
/// b: Vec<u16>,
|
/// b: Vec<u16>,
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn main() {
|
/// fn ssz_encode_example() {
|
||||||
/// let foo = Foo {
|
/// let foo = Foo {
|
||||||
/// a: 42,
|
/// a: 42,
|
||||||
/// b: vec![1, 3, 3, 7]
|
/// b: vec![1, 3, 3, 7]
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! b: Vec<u16>,
|
//! b: Vec<u16>,
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn ssz_encode_decode_example() {
|
||||||
//! let foo = Foo {
|
//! let foo = Foo {
|
||||||
//! a: 42,
|
//! a: 42,
|
||||||
//! b: vec![1, 3, 3, 7]
|
//! b: vec![1, 3, 3, 7]
|
||||||
|
@ -2,6 +2,7 @@ use ethereum_types::H256;
|
|||||||
use ssz::{Decode, DecodeError, Encode};
|
use ssz::{Decode, DecodeError, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
|
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
mod round_trip {
|
mod round_trip {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
@ -739,6 +739,7 @@ mod bitvector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
#[allow(clippy::cognitive_complexity)]
|
||||||
mod bitlist {
|
mod bitlist {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::BitList;
|
use crate::BitList;
|
||||||
@ -937,7 +938,7 @@ mod bitlist {
|
|||||||
fn test_set_unset(num_bits: usize) {
|
fn test_set_unset(num_bits: usize) {
|
||||||
let mut bitfield = BitList1024::with_capacity(num_bits).unwrap();
|
let mut bitfield = BitList1024::with_capacity(num_bits).unwrap();
|
||||||
|
|
||||||
for i in 0..num_bits + 1 {
|
for i in 0..=num_bits {
|
||||||
if i < num_bits {
|
if i < num_bits {
|
||||||
// Starts as false
|
// Starts as false
|
||||||
assert_eq!(bitfield.get(i), Ok(false));
|
assert_eq!(bitfield.get(i), Ok(false));
|
||||||
@ -1023,10 +1024,7 @@ mod bitlist {
|
|||||||
vec![0b1111_1111, 0b0000_0000]
|
vec![0b1111_1111, 0b0000_0000]
|
||||||
);
|
);
|
||||||
bitfield.set(8, true).unwrap();
|
bitfield.set(8, true).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(bitfield.into_raw_bytes(), vec![0b1111_1111, 0b0000_0001]);
|
||||||
bitfield.clone().into_raw_bytes(),
|
|
||||||
vec![0b1111_1111, 0b0000_0001]
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -261,15 +261,15 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn new() {
|
fn new() {
|
||||||
let vec = vec![42; 5];
|
let vec = vec![42; 5];
|
||||||
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec.clone());
|
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec);
|
||||||
assert!(fixed.is_err());
|
assert!(fixed.is_err());
|
||||||
|
|
||||||
let vec = vec![42; 3];
|
let vec = vec![42; 3];
|
||||||
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec.clone());
|
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec);
|
||||||
assert!(fixed.is_err());
|
assert!(fixed.is_err());
|
||||||
|
|
||||||
let vec = vec![42; 4];
|
let vec = vec![42; 4];
|
||||||
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec.clone());
|
let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec);
|
||||||
assert!(fixed.is_ok());
|
assert!(fixed.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -299,7 +299,7 @@ mod test {
|
|||||||
assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]);
|
assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]);
|
||||||
|
|
||||||
let vec = vec![];
|
let vec = vec![];
|
||||||
let fixed: FixedVector<u64, U4> = FixedVector::from(vec.clone());
|
let fixed: FixedVector<u64, U4> = FixedVector::from(vec);
|
||||||
assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]);
|
assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,15 +247,15 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn new() {
|
fn new() {
|
||||||
let vec = vec![42; 5];
|
let vec = vec![42; 5];
|
||||||
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec.clone());
|
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec);
|
||||||
assert!(fixed.is_err());
|
assert!(fixed.is_err());
|
||||||
|
|
||||||
let vec = vec![42; 3];
|
let vec = vec![42; 3];
|
||||||
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec.clone());
|
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec);
|
||||||
assert!(fixed.is_ok());
|
assert!(fixed.is_ok());
|
||||||
|
|
||||||
let vec = vec![42; 4];
|
let vec = vec![42; 4];
|
||||||
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec.clone());
|
let fixed: Result<VariableList<u64, U4>, _> = VariableList::new(vec);
|
||||||
assert!(fixed.is_ok());
|
assert!(fixed.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ mod test {
|
|||||||
assert_eq!(&fixed[..], &vec![42, 42, 42][..]);
|
assert_eq!(&fixed[..], &vec![42, 42, 42][..]);
|
||||||
|
|
||||||
let vec = vec![];
|
let vec = vec![];
|
||||||
let fixed: VariableList<u64, U4> = VariableList::from(vec.clone());
|
let fixed: VariableList<u64, U4> = VariableList::from(vec);
|
||||||
assert_eq!(&fixed[..], &vec![][..]);
|
assert_eq!(&fixed[..], &vec![][..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ fn get_hashable_fields_and_their_caches<'a>(
|
|||||||
///
|
///
|
||||||
/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute,
|
/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute,
|
||||||
/// or `None` otherwise.
|
/// or `None` otherwise.
|
||||||
fn get_cache_field_for<'a>(field: &'a syn::Field) -> Option<syn::Ident> {
|
fn get_cache_field_for(field: &syn::Field) -> Option<syn::Ident> {
|
||||||
use syn::{MetaList, NestedMeta};
|
use syn::{MetaList, NestedMeta};
|
||||||
|
|
||||||
let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs);
|
let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs);
|
||||||
|
@ -94,12 +94,12 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<
|
|||||||
|
|
||||||
info!("Writing config to {:?}", output_dir);
|
info!("Writing config to {:?}", output_dir);
|
||||||
|
|
||||||
let mut spec = lighthouse_testnet_spec(env.core_context().eth2_config.spec.clone());
|
let mut spec = lighthouse_testnet_spec(env.core_context().eth2_config.spec);
|
||||||
spec.min_genesis_time = min_genesis_time;
|
spec.min_genesis_time = min_genesis_time;
|
||||||
spec.min_genesis_active_validator_count = min_genesis_active_validator_count;
|
spec.min_genesis_active_validator_count = min_genesis_active_validator_count;
|
||||||
|
|
||||||
let testnet_config: Eth2TestnetConfig<T> = Eth2TestnetConfig {
|
let testnet_config: Eth2TestnetConfig<T> = Eth2TestnetConfig {
|
||||||
deposit_contract_address: format!("{}", deposit_contract.address()),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: deploy_block.as_u64(),
|
deposit_contract_deploy_block: deploy_block.as_u64(),
|
||||||
boot_enr: None,
|
boot_enr: None,
|
||||||
genesis_state: None,
|
genesis_state: None,
|
||||||
@ -152,7 +152,7 @@ pub fn parse_password(matches: &ArgMatches) -> Result<Option<String>, String> {
|
|||||||
})
|
})
|
||||||
.map(|password| {
|
.map(|password| {
|
||||||
// Trim the linefeed from the end.
|
// Trim the linefeed from the end.
|
||||||
if password.ends_with("\n") {
|
if password.ends_with('\n') {
|
||||||
password[0..password.len() - 1].to_string()
|
password[0..password.len() - 1].to_string()
|
||||||
} else {
|
} else {
|
||||||
password
|
password
|
||||||
|
@ -110,7 +110,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<
|
|||||||
|
|
||||||
env.runtime()
|
env.runtime()
|
||||||
.block_on(future)
|
.block_on(future)
|
||||||
.map_err(|()| format!("Failed to send transaction"))?;
|
.map_err(|()| "Failed to send transaction".to_string())?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -34,8 +34,8 @@ pub fn run_transition_blocks<T: EthSpec>(matches: &ArgMatches) -> Result<(), Str
|
|||||||
|
|
||||||
let post_state = do_transition(pre_state, block)?;
|
let post_state = do_transition(pre_state, block)?;
|
||||||
|
|
||||||
let mut output_file = File::create(output_path.clone())
|
let mut output_file =
|
||||||
.map_err(|e| format!("Unable to create output file: {:?}", e))?;
|
File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?;
|
||||||
|
|
||||||
output_file
|
output_file
|
||||||
.write_all(&post_state.as_ssz_bytes())
|
.write_all(&post_state.as_ssz_bytes())
|
||||||
|
@ -78,7 +78,7 @@ fn async_sim(
|
|||||||
|
|
||||||
let spec = &mut env.eth2_config.spec;
|
let spec = &mut env.eth2_config.spec;
|
||||||
|
|
||||||
spec.milliseconds_per_slot = spec.milliseconds_per_slot / speed_up_factor;
|
spec.milliseconds_per_slot /= speed_up_factor;
|
||||||
spec.eth1_follow_distance = 16;
|
spec.eth1_follow_distance = 16;
|
||||||
spec.seconds_per_day = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2;
|
spec.seconds_per_day = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2;
|
||||||
spec.min_genesis_time = 0;
|
spec.min_genesis_time = 0;
|
||||||
|
@ -194,8 +194,9 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.for_each(|duty| {
|
.for_each(|duty| {
|
||||||
if let Some(committee_index) = duty.attestation_committee_index {
|
if let Some(committee_index) = duty.attestation_committee_index {
|
||||||
let validator_duties =
|
let validator_duties = committee_indices
|
||||||
committee_indices.entry(committee_index).or_insert(vec![]);
|
.entry(committee_index)
|
||||||
|
.or_insert_with(|| vec![]);
|
||||||
|
|
||||||
validator_duties.push(duty);
|
validator_duties.push(duty);
|
||||||
}
|
}
|
||||||
|
@ -443,7 +443,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
|
|||||||
/// Attempt to download the duties of all managed validators for the given `epoch`.
|
/// Attempt to download the duties of all managed validators for the given `epoch`.
|
||||||
fn update_epoch(self, epoch: Epoch) -> impl Future<Item = (), Error = String> {
|
fn update_epoch(self, epoch: Epoch) -> impl Future<Item = (), Error = String> {
|
||||||
let service_1 = self.clone();
|
let service_1 = self.clone();
|
||||||
let service_2 = self.clone();
|
let service_2 = self;
|
||||||
|
|
||||||
let pubkeys = service_1.validator_store.voting_pubkeys();
|
let pubkeys = service_1.validator_store.voting_pubkeys();
|
||||||
service_1
|
service_1
|
||||||
|
@ -384,12 +384,11 @@ mod tests {
|
|||||||
"withdrawal keypair should be as expected"
|
"withdrawal keypair should be as expected"
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
created_dir
|
!created_dir
|
||||||
.deposit_data
|
.deposit_data
|
||||||
.clone()
|
.clone()
|
||||||
.expect("should have data")
|
.expect("should have data")
|
||||||
.len()
|
.is_empty(),
|
||||||
> 0,
|
|
||||||
"should have some deposit data"
|
"should have some deposit data"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user