Optimizations, disable val client sync check & additional lcli tools (#834)

* Start adding interop genesis state to lcli

* Use more efficient method to generate genesis state

* Remove duplicate int_to_bytes32

* Add lcli command to change state genesis time

* Add option to allow VC to start with unsynced BN

* Set VC to do parallel key loading

* Don't default to dummy eth1 backend

* Add endpoint to dump operation pool

* Add metrics for op pool

* Remove state clone for slot notifier

* Add mem size approximation for tree hash cache

* Avoid cloning tree hash when getting head

* Fix failing API tests

* Address Michael's comments

* Add HashMap::from_par_iter
This commit is contained in:
Paul Hauner 2020-02-04 12:43:04 +11:00 committed by GitHub
parent eef56e77ef
commit f267bf2afe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 479 additions and 122 deletions

6
Cargo.lock generated
View File

@ -2785,6 +2785,8 @@ dependencies = [
"int_to_bytes 0.1.0",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
"state_processing 0.1.0",
"types 0.1.0",
]
@ -3348,6 +3350,7 @@ dependencies = [
"eth2_ssz 0.1.2",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"operation_pool 0.1.0",
"proto_array_fork_choice 0.1.0",
"reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rest_api 0.1.0",
@ -3417,6 +3420,7 @@ dependencies = [
"lighthouse_metrics 0.1.0",
"network 0.1.0",
"node_test_rig 0.1.0",
"operation_pool 0.1.0",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"remote_beacon_node 0.1.0",
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3923,8 +3927,10 @@ dependencies = [
"bls 0.1.0",
"criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"eth2_hashing 0.1.1",
"eth2_ssz 0.1.2",
"eth2_ssz_types 0.2.0",
"int_to_bytes 0.1.0",
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -475,7 +475,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
.map(|v| v.clone())
.map(|v| v.clone_with_only_committee_caches())
}
/// Returns info representing the head block and state.

View File

@ -41,4 +41,13 @@ impl<E: EthSpec> CheckPoint<E> {
self.beacon_state = beacon_state;
self.beacon_state_root = beacon_state_root;
}
pub fn clone_with_only_committee_caches(&self) -> Self {
Self {
beacon_block: self.beacon_block.clone(),
beacon_block_root: self.beacon_block_root,
beacon_state: self.beacon_state.clone_with_only_committee_caches(),
beacon_state_root: self.beacon_state_root,
}
}
}

View File

@ -198,6 +198,18 @@ lazy_static! {
try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain");
pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result<IntGauge> =
try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain");
/*
* Operation Pool
*/
pub static ref OP_POOL_NUM_ATTESTATIONS: Result<IntGauge> =
try_create_int_gauge("beacon_op_pool_attestations_total", "Count of attestations in the op pool");
pub static ref OP_POOL_NUM_ATTESTER_SLASHINGS: Result<IntGauge> =
try_create_int_gauge("beacon_op_pool_attester_slashings_total", "Count of attester slashings in the op pool");
pub static ref OP_POOL_NUM_PROPOSER_SLASHINGS: Result<IntGauge> =
try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool");
pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result<IntGauge> =
try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool");
}
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
@ -206,6 +218,23 @@ pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
if let Ok(head) = beacon_chain.head() {
scrape_head_state::<T>(&head.beacon_state, head.beacon_state_root)
}
set_gauge_by_usize(
&OP_POOL_NUM_ATTESTATIONS,
beacon_chain.op_pool.num_attestations(),
);
set_gauge_by_usize(
&OP_POOL_NUM_ATTESTER_SLASHINGS,
beacon_chain.op_pool.num_attester_slashings(),
);
set_gauge_by_usize(
&OP_POOL_NUM_PROPOSER_SLASHINGS,
beacon_chain.op_pool.num_proposer_slashings(),
);
set_gauge_by_usize(
&OP_POOL_NUM_VOLUNTARY_EXITS,
beacon_chain.op_pool.num_voluntary_exits(),
);
}
/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`.

View File

@ -70,14 +70,14 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
usize::max_value()
};
let head = beacon_chain.head()
let head_info = beacon_chain.head_info()
.map_err(|e| error!(
log,
"Failed to get beacon chain head";
"Failed to get beacon chain head info";
"error" => format!("{:?}", e)
))?;
let head_slot = head.beacon_block.slot;
let head_slot = head_info.slot;
let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch());
let current_slot = beacon_chain.slot().map_err(|e| {
error!(
@ -87,9 +87,9 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
)
})?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let finalized_epoch = head.beacon_state.finalized_checkpoint.epoch;
let finalized_root = head.beacon_state.finalized_checkpoint.root;
let head_root = head.beacon_block_root;
let finalized_epoch = head_info.finalized_checkpoint.epoch;
let finalized_root = head_info.finalized_checkpoint.root;
let head_root = head_info.block_root;
let mut speedo = speedo.lock();
speedo.observe(head_slot, Instant::now());

View File

@ -1,6 +1,6 @@
use crate::DepositLog;
use eth2_hashing::hash;
use ssz_derive::{Decode, Encode};
use state_processing::common::DepositDataTree;
use std::cmp::Ordering;
use tree_hash::TreeHash;
use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH};
@ -31,56 +31,6 @@ pub enum Error {
InternalError(String),
}
/// Emulates the eth1 deposit contract merkle tree.
pub struct DepositDataTree {
tree: merkle_proof::MerkleTree,
mix_in_length: usize,
depth: usize,
}
impl DepositDataTree {
/// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth.
pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self {
Self {
tree: merkle_proof::MerkleTree::create(leaves, depth),
mix_in_length,
depth,
}
}
/// Returns 32 bytes representing the "mix in length" for the merkle root of this tree.
fn length_bytes(&self) -> Vec<u8> {
int_to_bytes32(self.mix_in_length)
}
/// Retrieve the root hash of this Merkle tree with the length mixed in.
pub fn root(&self) -> Hash256 {
let mut preimage = [0; 64];
preimage[0..32].copy_from_slice(&self.tree.hash()[..]);
preimage[32..64].copy_from_slice(&self.length_bytes());
Hash256::from_slice(&hash(&preimage))
}
/// Return the leaf at `index` and a Merkle proof of its inclusion.
///
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
pub fn generate_proof(&self, index: usize) -> (Hash256, Vec<Hash256>) {
let (root, mut proof) = self.tree.generate_proof(index, self.depth);
proof.push(Hash256::from_slice(&self.length_bytes()));
(root, proof)
}
/// Add a deposit to the merkle tree.
pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), Error> {
self.tree
.push_leaf(leaf, self.depth)
.map_err(Error::DepositTreeError)?;
self.mix_in_length += 1;
Ok(())
}
}
#[derive(Encode, Decode, Clone)]
pub struct SszDepositCache {
logs: Vec<DepositLog>,
@ -202,7 +152,9 @@ impl DepositCache {
let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root());
self.leaves.push(deposit);
self.logs.push(log);
self.deposit_tree.push_leaf(deposit)?;
self.deposit_tree
.push_leaf(deposit)
.map_err(Error::DepositTreeError)?;
self.deposit_roots.push(self.deposit_tree.root());
Ok(())
}
@ -334,13 +286,6 @@ impl DepositCache {
}
}
/// Returns `int` as little-endian bytes with a length of 32.
fn int_to_bytes32(int: usize) -> Vec<u8> {
let mut vec = int.to_le_bytes().to_vec();
vec.resize(32, 0);
vec
}
#[cfg(test)]
pub mod tests {
use super::*;

View File

@ -34,6 +34,7 @@ slot_clock = { path = "../../eth2/utils/slot_clock" }
hex = "0.3"
parking_lot = "0.9"
futures = "0.1.29"
operation_pool = { path = "../../eth2/operation_pool" }
[dev-dependencies]
remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" }

View File

@ -2,6 +2,7 @@ use crate::response_builder::ResponseBuilder;
use crate::ApiResult;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use hyper::{Body, Request};
use operation_pool::PersistedOperationPool;
use std::sync::Arc;
/// Returns the `proto_array` fork choice struct, encoded as JSON.
@ -13,3 +14,15 @@ pub fn get_fork_choice<T: BeaconChainTypes>(
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&*beacon_chain.fork_choice.core_proto_array())
}
/// Returns the `PersistedOperationPool` struct.
///
/// Useful for debugging or advanced inspection of the stored operations.
pub fn get_operation_pool<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body(&PersistedOperationPool::from_operation_pool(
&beacon_chain.op_pool,
))
}

View File

@ -137,16 +137,10 @@ pub fn state_at_slot<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
slot: Slot,
) -> Result<(Hash256, BeaconState<T::EthSpec>), ApiError> {
let head_state = &beacon_chain.head()?.beacon_state;
let head = beacon_chain.head()?;
if head_state.slot == slot {
// The request slot is the same as the best block (head) slot.
// I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary.
Ok((
beacon_chain.head()?.beacon_state_root,
beacon_chain.head()?.beacon_state,
))
if head.beacon_state.slot == slot {
Ok((head.beacon_state_root, head.beacon_state))
} else {
let root = state_root_at_slot(beacon_chain, slot)?;

View File

@ -151,6 +151,9 @@ pub fn route<T: BeaconChainTypes>(
(&Method::GET, "/advanced/fork_choice") => {
into_boxfut(advanced::get_fork_choice::<T>(req, beacon_chain))
}
(&Method::GET, "/advanced/operation_pool") => {
into_boxfut(advanced::get_operation_pool::<T>(req, beacon_chain))
}
(&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::<T>(
req,

View File

@ -156,6 +156,7 @@ fn return_validator_duties<T: BeaconChainTypes>(
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch)
.map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?;
state.update_pubkey_cache()?;
state
.build_committee_cache(relative_epoch, &beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;

View File

@ -6,7 +6,8 @@ use node_test_rig::{
testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode,
};
use remote_beacon_node::{
Committee, HeadBeaconBlock, PublishStatus, ValidatorDuty, ValidatorResponse,
Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorDuty,
ValidatorResponse,
};
use std::convert::TryInto;
use std::sync::Arc;
@ -237,10 +238,12 @@ fn check_duties<T: BeaconChainTypes>(
"there should be a duty for each validator"
);
let state = beacon_chain
let mut state = beacon_chain
.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
.expect("should get state at slot");
state.build_all_caches(spec).expect("should build caches");
validators
.iter()
.zip(duties.iter())
@ -816,6 +819,29 @@ fn get_fork_choice() {
);
}
#[test]
fn get_operation_pool() {
let mut env = build_env();
let node = build_node(&mut env, testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let result = env
.runtime()
.block_on(remote_node.http.advanced().get_operation_pool())
.expect("should not error when getting fork choice");
let expected = PersistedOperationPool::from_operation_pool(
&node
.client
.beacon_chain()
.expect("node should have chain")
.op_pool,
);
assert_eq!(result, expected, "result should be as expected");
}
fn compare_validator_response<T: EthSpec>(
state: &BeaconState<T>,
response: &ValidatorResponse,

View File

@ -377,7 +377,6 @@ fn init_new_client<E: EthSpec>(
eth2_testnet_config.deposit_contract_deploy_block;
client_config.eth1.follow_distance = spec.eth1_follow_distance / 2;
client_config.dummy_eth1_backend = false;
client_config.eth1.lowest_cached_block_number = client_config
.eth1
.deposit_contract_deploy_block

View File

@ -11,6 +11,8 @@ types = { path = "../types" }
state_processing = { path = "../state_processing" }
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
serde = "1.0.102"
serde_derive = "1.0.102"
[dev-dependencies]
rand = "0.7.2"

View File

@ -1,10 +1,13 @@
use int_to_bytes::int_to_bytes8;
use serde_derive::{Deserialize, Serialize};
use ssz::ssz_encode;
use ssz_derive::{Decode, Encode};
use types::{AttestationData, BeaconState, ChainSpec, Domain, Epoch, EthSpec};
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
#[derive(PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode)]
#[derive(
PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize,
)]
pub struct AttestationId {
v: Vec<u8>,
}

View File

@ -284,6 +284,16 @@ impl<T: EthSpec> OperationPool<T> {
});
}
/// Total number of attester slashings in the pool.
pub fn num_attester_slashings(&self) -> usize {
self.attester_slashings.read().len()
}
/// Total number of proposer slashings in the pool.
pub fn num_proposer_slashings(&self) -> usize {
self.proposer_slashings.read().len()
}
/// Insert a voluntary exit, validating it almost-entirely (future exits are permitted).
pub fn insert_voluntary_exit(
&self,
@ -327,6 +337,11 @@ impl<T: EthSpec> OperationPool<T> {
self.prune_attester_slashings(finalized_state, spec);
self.prune_voluntary_exits(finalized_state);
}
/// Total number of voluntary exits in the pool.
pub fn num_voluntary_exits(&self) -> usize {
self.voluntary_exits.read().len()
}
}
/// Filter up to a maximum number of operations out of an iterator.

View File

@ -1,6 +1,7 @@
use crate::attestation_id::AttestationId;
use crate::OperationPool;
use parking_lot::RwLock;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use types::*;
@ -8,7 +9,8 @@ use types::*;
///
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
#[derive(Clone, Encode, Decode)]
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
#[serde(bound = "T: EthSpec")]
pub struct PersistedOperationPool<T: EthSpec> {
/// Mapping from attestation ID to attestation mappings.
// We could save space by not storing the attestation ID, but it might

View File

@ -31,6 +31,8 @@ tree_hash = "0.1.0"
tree_hash_derive = "0.2"
types = { path = "../types" }
rayon = "1.2.0"
eth2_hashing = { path = "../utils/eth2_hashing" }
int_to_bytes = { path = "../utils/int_to_bytes" }
[features]
fake_crypto = ["bls/fake_crypto"]

View File

@ -0,0 +1,52 @@
use eth2_hashing::hash;
use int_to_bytes::int_to_bytes32;
use merkle_proof::{MerkleTree, MerkleTreeError};
use types::Hash256;
/// Emulates the eth1 deposit contract merkle tree.
pub struct DepositDataTree {
tree: MerkleTree,
mix_in_length: usize,
depth: usize,
}
impl DepositDataTree {
/// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth.
pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self {
Self {
tree: MerkleTree::create(leaves, depth),
mix_in_length,
depth,
}
}
/// Returns 32 bytes representing the "mix in length" for the merkle root of this tree.
fn length_bytes(&self) -> Vec<u8> {
int_to_bytes32(self.mix_in_length as u64)
}
/// Retrieve the root hash of this Merkle tree with the length mixed in.
pub fn root(&self) -> Hash256 {
let mut preimage = [0; 64];
preimage[0..32].copy_from_slice(&self.tree.hash()[..]);
preimage[32..64].copy_from_slice(&self.length_bytes());
Hash256::from_slice(&hash(&preimage))
}
/// Return the leaf at `index` and a Merkle proof of its inclusion.
///
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
pub fn generate_proof(&self, index: usize) -> (Hash256, Vec<Hash256>) {
let (root, mut proof) = self.tree.generate_proof(index, self.depth);
proof.push(Hash256::from_slice(&self.length_bytes()));
(root, proof)
}
/// Add a deposit to the merkle tree.
pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> {
self.tree.push_leaf(leaf, self.depth)?;
self.mix_in_length += 1;
Ok(())
}
}

View File

@ -1,9 +1,11 @@
mod deposit_data_tree;
mod get_attesting_indices;
mod get_base_reward;
mod get_indexed_attestation;
mod initiate_validator_exit;
mod slash_validator;
pub use deposit_data_tree::DepositDataTree;
pub use get_attesting_indices::get_attesting_indices;
pub use get_base_reward::get_base_reward;
pub use get_indexed_attestation::get_indexed_attestation;

View File

@ -1,6 +1,7 @@
use super::per_block_processing::{errors::BlockProcessingError, process_deposit};
use crate::common::DepositDataTree;
use tree_hash::TreeHash;
use types::typenum::U4294967296;
use types::DEPOSIT_TREE_DEPTH;
use types::*;
/// Initialize a `BeaconState` from genesis data.
@ -26,14 +27,13 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
// Seed RANDAO with Eth1 entropy
state.fill_randao_mixes_with(eth1_block_hash);
// Process deposits
let leaves: Vec<_> = deposits
.iter()
.map(|deposit| deposit.data.clone())
.collect();
for (index, deposit) in deposits.into_iter().enumerate() {
let deposit_data_list = VariableList::<_, U4294967296>::from(leaves[..=index].to_vec());
state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root());
let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
for deposit in deposits.iter() {
deposit_tree
.push_leaf(Hash256::from_slice(&deposit.data.tree_hash_root()))
.map_err(BlockProcessingError::MerkleTreeError)?;
state.eth1_data.deposit_root = deposit_tree.root();
process_deposit(&mut state, &deposit, spec, true)?;
}

View File

@ -1,4 +1,5 @@
use super::signature_sets::Error as SignatureSetError;
use merkle_proof::MerkleTreeError;
use types::*;
/// The error returned from the `per_block_processing` function. Indicates that a block is either
@ -46,6 +47,7 @@ pub enum BlockProcessingError {
BeaconStateError(BeaconStateError),
SignatureSetError(SignatureSetError),
SszTypesError(ssz_types::Error),
MerkleTreeError(MerkleTreeError),
}
impl From<BeaconStateError> for BlockProcessingError {

View File

@ -97,6 +97,21 @@ impl BeaconTreeHashCache {
pub fn is_initialized(&self) -> bool {
self.initialized
}
/// Returns the approximate size of the cache in bytes.
///
/// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers.
/// We focus instead on the lists of hashes, which should massively outweigh the items that we
/// ignore.
pub fn approx_mem_size(&self) -> usize {
self.block_roots.approx_mem_size()
+ self.state_roots.approx_mem_size()
+ self.historical_roots.approx_mem_size()
+ self.validators.approx_mem_size()
+ self.balances.approx_mem_size()
+ self.randao_mixes.approx_mem_size()
+ self.slashings.approx_mem_size()
}
}
/// The state of the `BeaconChain` at some slot.
@ -996,6 +1011,12 @@ impl<T: EthSpec> BeaconState<T> {
tree_hash_cache: BeaconTreeHashCache::default(),
}
}
pub fn clone_with_only_committee_caches(&self) -> Self {
let mut state = self.clone_without_caches();
state.committee_caches = self.committee_caches.clone();
state
}
}
impl From<RelativeEpochError> for Error {

View File

@ -127,6 +127,15 @@ impl TreeHashCache {
pub fn leaves(&mut self) -> &mut Vec<Hash256> {
&mut self.layers[self.depth]
}
/// Returns the approximate size of the cache in bytes.
///
/// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers.
/// We focus instead on the lists of hashes, which should massively outweigh the items that we
/// ignore.
pub fn approx_mem_size(&self) -> usize {
self.layers.iter().map(|layer| layer.len() * 32).sum()
}
}
/// Compute the dirty indices for one layer up.

View File

@ -16,6 +16,22 @@ pub struct MultiTreeHashCache {
value_caches: Vec<TreeHashCache>,
}
impl MultiTreeHashCache {
/// Returns the approximate size of the cache in bytes.
///
/// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers.
/// We focus instead on the lists of hashes, which should massively outweigh the items that we
/// ignore.
pub fn approx_mem_size(&self) -> usize {
self.list_cache.approx_mem_size()
+ self
.value_caches
.iter()
.map(TreeHashCache::approx_mem_size)
.sum::<usize>()
}
}
impl<T, N> CachedTreeHash<MultiTreeHashCache> for VariableList<T, N>
where
T: CachedTreeHash<TreeHashCache>,

View File

@ -28,7 +28,7 @@ pub enum MerkleTree {
Zero(usize),
}
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Clone)]
pub enum MerkleTreeError {
// Trying to push in a leaf
LeafReached,

View File

@ -18,3 +18,4 @@ eth2_ssz = { path = "../../../eth2/utils/ssz" }
serde_json = "^1.0"
eth2_config = { path = "../../../eth2/utils/eth2_config" }
proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" }
operation_pool = { path = "../../../eth2/operation_pool" }

View File

@ -5,7 +5,6 @@
use eth2_config::Eth2Config;
use futures::{future, Future, IntoFuture};
use proto_array_fork_choice::core::ProtoArray;
use reqwest::{
r#async::{Client, ClientBuilder, Response},
StatusCode,
@ -20,6 +19,8 @@ use types::{
};
use url::Url;
pub use operation_pool::PersistedOperationPool;
pub use proto_array_fork_choice::core::ProtoArray;
pub use rest_api::{
CanonicalHeadResponse, Committee, HeadBeaconBlock, ValidatorDutiesRequest, ValidatorDuty,
ValidatorRequest, ValidatorResponse,
@ -560,6 +561,16 @@ impl<E: EthSpec> Advanced<E> {
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
/// Gets the core `PersistedOperationPool` struct from the node.
pub fn get_operation_pool(
&self,
) -> impl Future<Item = PersistedOperationPool<E>, Error = Error> {
let client = self.0.clone();
self.url("operation_pool")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
}
#[derive(Deserialize)]

View File

@ -0,0 +1,40 @@
use clap::ArgMatches;
use ssz::{Decode, Encode};
use std::fs::File;
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{BeaconState, EthSpec};
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let path = matches
.value_of("ssz-state")
.ok_or_else(|| "ssz-state not specified")?
.parse::<PathBuf>()
.map_err(|e| format!("Unable to parse ssz-state: {}", e))?;
let genesis_time = matches
.value_of("genesis-time")
.ok_or_else(|| "genesis-time not specified")?
.parse::<u64>()
.map_err(|e| format!("Unable to parse genesis-time: {}", e))?;
let mut state: BeaconState<T> = {
let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?;
let mut ssz = vec![];
file.read_to_end(&mut ssz)
.map_err(|e| format!("Unable to read file: {}", e))?;
BeaconState::from_ssz_bytes(&ssz).map_err(|e| format!("Unable to decode SSZ: {:?}", e))?
};
state.genesis_time = genesis_time;
let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?;
file.write_all(&state.as_ssz_bytes())
.map_err(|e| format!("Unable to write to file: {}", e))?;
Ok(())
}

View File

@ -0,0 +1,65 @@
use clap::ArgMatches;
use environment::Environment;
use eth2_testnet_config::Eth2TestnetConfig;
use genesis::interop_genesis_state;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
use types::{test_utils::generate_deterministic_keypairs, Epoch, EthSpec, Fork};
pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> {
let validator_count = matches
.value_of("validator-count")
.ok_or_else(|| "validator-count not specified")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse validator-count: {}", e))?;
let genesis_time = if let Some(genesis_time) = matches.value_of("genesis-time") {
genesis_time
.parse::<u64>()
.map_err(|e| format!("Unable to parse genesis-time: {}", e))?
} else {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("Unable to get time: {:?}", e))?
.as_secs()
};
let testnet_dir = matches
.value_of("testnet-dir")
.ok_or_else(|| ())
.and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ()))
.unwrap_or_else(|_| {
dirs::home_dir()
.map(|home| home.join(".lighthouse").join("testnet"))
.expect("should locate home directory")
});
let mut eth2_testnet_config: Eth2TestnetConfig<T> =
Eth2TestnetConfig::load(testnet_dir.clone())?;
let mut spec = eth2_testnet_config
.yaml_config
.as_ref()
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
.apply_to_chain_spec::<T>(&env.core_context().eth2_config.spec)
.ok_or_else(|| {
format!(
"The loaded config is not compatible with the {} spec",
&env.core_context().eth2_config.spec_constants
)
})?;
spec.genesis_fork = Fork {
previous_version: [0, 0, 0, 0],
current_version: [1, 3, 3, 7],
epoch: Epoch::new(0),
};
let keypairs = generate_deterministic_keypairs(validator_count);
let genesis_state = interop_genesis_state(&keypairs, genesis_time, &spec)?;
eth2_testnet_config.genesis_state = Some(genesis_state);
eth2_testnet_config.force_write_to_file(testnet_dir)?;
Ok(())
}

View File

@ -1,8 +1,10 @@
#[macro_use]
extern crate log;
mod change_genesis_time;
mod deploy_deposit_contract;
mod eth1_genesis;
mod interop_genesis;
mod parse_hex;
mod refund_deposit_contract;
mod transition_blocks;
@ -226,6 +228,59 @@ fn main() {
.help("The URL to the eth1 JSON-RPC http API."),
)
)
.subcommand(
SubCommand::with_name("interop-genesis")
.about(
"Produces an interop-compatible genesis state using deterministic keypairs",
)
.arg(
Arg::with_name("testnet-dir")
.short("d")
.long("testnet-dir")
.value_name("PATH")
.takes_value(true)
.help("The testnet dir. Defaults to ~/.lighthouse/testnet"),
)
.arg(
Arg::with_name("validator-count")
.long("validator-count")
.index(1)
.value_name("INTEGER")
.takes_value(true)
.default_value("1024")
.help("The number of validators in the genesis state."),
)
.arg(
Arg::with_name("genesis-time")
.long("genesis-time")
.short("t")
.value_name("UNIX_EPOCH")
.takes_value(true)
.help("The value for state.genesis_time. Defaults to now."),
)
)
.subcommand(
SubCommand::with_name("change-genesis-time")
.about(
"Loads a file with an SSZ-encoded BeaconState and modifies the genesis time.",
)
.arg(
Arg::with_name("ssz-state")
.index(1)
.value_name("PATH")
.takes_value(true)
.required(true)
.help("The path to the SSZ file"),
)
.arg(
Arg::with_name("genesis-time")
.index(2)
.value_name("UNIX_EPOCH")
.takes_value(true)
.required(true)
.help("The value for state.genesis_time."),
)
)
.get_matches();
macro_rules! run_with_spec {
@ -306,6 +361,10 @@ fn run<T: EthSpec>(env_builder: EnvironmentBuilder<T>, matches: &ArgMatches) {
}
("eth1-genesis", Some(matches)) => eth1_genesis::run::<T>(env, matches)
.unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)),
("interop-genesis", Some(matches)) => interop_genesis::run::<T>(env, matches)
.unwrap_or_else(|e| error!("Failed to run interop-genesis command: {}", e)),
("change-genesis-time", Some(matches)) => change_genesis_time::run::<T>(matches)
.unwrap_or_else(|e| error!("Failed to run change-genesis-time command: {}", e)),
(other, _) => error!("Unknown subcommand {}. See --help.", other),
}
}

View File

@ -14,6 +14,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.default_value(&DEFAULT_HTTP_SERVER)
.takes_value(true),
)
.arg(
Arg::with_name("allow-unsynced")
.long("allow-unsynced")
.help("If present, the validator client will still poll for duties if the beacon
node is not synced.")
)
/*
* The "testnet" sub-command.
*

View File

@ -32,6 +32,9 @@ pub struct Config {
///
/// Should be similar to `http://localhost:8080`
pub http_server: String,
/// If true, the validator client will still poll for duties and produce blocks even if the
/// beacon node is not synced at startup.
pub allow_unsynced_beacon_node: bool,
}
impl Default for Config {
@ -44,6 +47,7 @@ impl Default for Config {
data_dir,
key_source: <_>::default(),
http_server: DEFAULT_HTTP_SERVER.to_string(),
allow_unsynced_beacon_node: false,
}
}
}
@ -71,7 +75,7 @@ impl Config {
config.http_server = server.to_string();
}
let config = match cli_args.subcommand() {
let mut config = match cli_args.subcommand() {
("testnet", Some(sub_cli_args)) => {
if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") {
return Err(
@ -88,6 +92,8 @@ impl Config {
}
};
config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced");
Ok(config)
}
}

View File

@ -190,6 +190,7 @@ pub struct DutiesServiceBuilder<T, E: EthSpec> {
slot_clock: Option<T>,
beacon_node: Option<RemoteBeaconNode<E>>,
context: Option<RuntimeContext<E>>,
allow_unsynced_beacon_node: bool,
}
impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
@ -199,6 +200,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
slot_clock: None,
beacon_node: None,
context: None,
allow_unsynced_beacon_node: false,
}
}
@ -222,6 +224,12 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
self
}
/// Set to `true` to allow polling for duties when the beacon node is not synced.
pub fn allow_unsynced_beacon_node(mut self, allow_unsynced_beacon_node: bool) -> Self {
self.allow_unsynced_beacon_node = allow_unsynced_beacon_node;
self
}
pub fn build(self) -> Result<DutiesService<T, E>, String> {
Ok(DutiesService {
inner: Arc::new(Inner {
@ -238,6 +246,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
context: self
.context
.ok_or_else(|| "Cannot build DutiesService without runtime_context")?,
allow_unsynced_beacon_node: self.allow_unsynced_beacon_node,
}),
})
}
@ -250,6 +259,9 @@ pub struct Inner<T, E: EthSpec> {
pub(crate) slot_clock: T,
beacon_node: RemoteBeaconNode<E>,
context: RuntimeContext<E>,
/// If true, the duties service will poll for duties from the beacon node even if it is not
/// synced.
allow_unsynced_beacon_node: bool,
}
/// Maintains a store of the duties for all voting validators in the `validator_store`.
@ -404,8 +416,10 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
.and_then(move |(current_epoch, beacon_head_epoch)| {
let log = service_3.context.log.clone();
let future: Box<dyn Future<Item = (), Error = ()> + Send> =
if beacon_head_epoch + 1 < current_epoch {
let future: Box<dyn Future<Item = (), Error = ()> + Send> = if beacon_head_epoch + 1
< current_epoch
&& !service_3.allow_unsynced_beacon_node
{
error!(
log,
"Beacon node is not synced";

View File

@ -210,6 +210,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
.validator_store(validator_store.clone())
.beacon_node(beacon_node.clone())
.runtime_context(context.service_context("duties".into()))
.allow_unsynced_beacon_node(config.allow_unsynced_beacon_node)
.build()?;
let block_service = BlockServiceBuilder::new()

View File

@ -33,8 +33,10 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
fork_service: ForkService<T, E>,
log: Logger,
) -> Result<Self, String> {
let validator_iter = read_dir(&base_dir)
let validator_key_values = read_dir(&base_dir)
.map_err(|e| format!("Failed to read base directory {:?}: {:?}", base_dir, e))?
.collect::<Vec<_>>()
.into_par_iter()
.filter_map(|validator_dir| {
let path = validator_dir.ok()?.path();
@ -63,7 +65,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
});
Ok(Self {
validators: Arc::new(RwLock::new(HashMap::from_iter(validator_iter))),
validators: Arc::new(RwLock::new(HashMap::from_par_iter(validator_key_values))),
spec: Arc::new(spec),
log,
temp_dir: None,