78d82d9193
* Update to spec v0.9.0 * Update to v0.9.1 * Bump spec tags for v0.9.1 * Formatting, fix CI failures * Resolve accidental KeyPair merge conflict * Document new BeaconState functions * Add `validator` changes from `validator-to-rest` * Add initial (failing) REST api tests * Fix signature parsing * Add more tests * Refactor http router * Add working tests for publish beacon block * Add validator duties tests * Move account_manager under `lighthouse` binary * Unify logfile handling in `environment` crate. * Fix incorrect cache drops in `advance_caches` * Update fork choice for v0.9.1 * Add `deposit_contract` crate * Add progress on validator onboarding * Add unfinished attesation code * Update account manager CLI * Write eth1 data file as hex string * Integrate ValidatorDirectory with validator_client * Move ValidatorDirectory into validator_client * Clean up some FIXMEs * Add beacon_chain_sim * Fix a few docs/logs * Expand `beacon_chain_sim` * Fix spec for `beacon_chain_sim * More testing for api * Start work on attestation endpoint * Reject empty attestations * Allow attestations to genesis block * Add working tests for `rest_api` validator endpoint * Remove grpc from beacon_node * Start heavy refactor of validator client - Block production is working * Prune old validator client files * Start works on attestation service * Add attestation service to validator client * Use full pubkey for validator directories * Add validator duties post endpoint * Use par_iter for keypair generation * Use bulk duties request in validator client * Add version http endpoint tests * Add interop keys and startup wait * Ensure a prompt exit * Add duties pruning * Fix compile error in beacon node tests * Add github workflow * Modify rust.yaml * Modify gitlab actions * Add to CI file * Add sudo to CI npm install * Move cargo fmt to own job in tests * Fix cargo fmt in CI * Add rustup update before cargo fmt * Change name of CI job * Make other CI jobs require cargo fmt * Add CI badge * Remove gitlab and travis files * Add different http timeout for debug * Update docker file, use makefile in CI * Use make in the dockerfile, skip the test * Use the makefile for debug GI test * Update book * Tidy grpc and misc things * Apply discv5 fixes * Address other minor issues * Fix warnings * Attempt fix for addr parsing * Tidy validator config, CLIs * Tidy comments * Tidy signing, reduce ForkService duplication * Fail if skipping too many slots * Set default recent genesis time to 0 * Add custom http timeout to validator * Fix compile bug in node_test_rig * Remove old bootstrap flag from val CLI * Update docs * Tidy val client * Change val client log levels * Add comments, more validity checks * Fix compile error, add comments * Undo changes to eth2-libp2p/src * Reduce duplication of keypair generation * Add more logging for validator duties * Fix beacon_chain_sim, nitpicks * Fix compile error, minor nits * Address Michael's comments
174 lines
5.4 KiB
Rust
174 lines
5.4 KiB
Rust
use environment::RuntimeContext;
|
|
use exit_future::Signal;
|
|
use futures::{Future, Stream};
|
|
use parking_lot::RwLock;
|
|
use remote_beacon_node::RemoteBeaconNode;
|
|
use slog::{crit, info, trace};
|
|
use slot_clock::SlotClock;
|
|
use std::ops::Deref;
|
|
use std::sync::Arc;
|
|
use std::time::{Duration, Instant};
|
|
use tokio::timer::Interval;
|
|
use types::{ChainSpec, EthSpec, Fork};
|
|
|
|
/// Delay this period of time after the slot starts. This allows the node to process the new slot.
|
|
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80);
|
|
|
|
/// Builds a `ForkService`.
|
|
pub struct ForkServiceBuilder<T, E: EthSpec> {
|
|
fork: Option<Fork>,
|
|
slot_clock: Option<T>,
|
|
beacon_node: Option<RemoteBeaconNode<E>>,
|
|
context: Option<RuntimeContext<E>>,
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> {
|
|
pub fn new() -> Self {
|
|
Self {
|
|
fork: None,
|
|
slot_clock: None,
|
|
beacon_node: None,
|
|
context: None,
|
|
}
|
|
}
|
|
|
|
pub fn slot_clock(mut self, slot_clock: T) -> Self {
|
|
self.slot_clock = Some(slot_clock);
|
|
self
|
|
}
|
|
|
|
pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self {
|
|
self.beacon_node = Some(beacon_node);
|
|
self
|
|
}
|
|
|
|
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
|
|
self.context = Some(context);
|
|
self
|
|
}
|
|
|
|
pub fn build(self) -> Result<ForkService<T, E>, String> {
|
|
Ok(ForkService {
|
|
inner: Arc::new(Inner {
|
|
fork: RwLock::new(self.fork),
|
|
slot_clock: self
|
|
.slot_clock
|
|
.ok_or_else(|| "Cannot build ForkService without slot_clock")?,
|
|
beacon_node: self
|
|
.beacon_node
|
|
.ok_or_else(|| "Cannot build ForkService without beacon_node")?,
|
|
context: self
|
|
.context
|
|
.ok_or_else(|| "Cannot build ForkService without runtime_context")?,
|
|
}),
|
|
})
|
|
}
|
|
}
|
|
|
|
/// Helper to minimise `Arc` usage.
|
|
pub struct Inner<T, E: EthSpec> {
|
|
fork: RwLock<Option<Fork>>,
|
|
beacon_node: RemoteBeaconNode<E>,
|
|
context: RuntimeContext<E>,
|
|
slot_clock: T,
|
|
}
|
|
|
|
/// Attempts to download the `Fork` struct from the beacon node at the start of each epoch.
|
|
pub struct ForkService<T, E: EthSpec> {
|
|
inner: Arc<Inner<T, E>>,
|
|
}
|
|
|
|
impl<T, E: EthSpec> Clone for ForkService<T, E> {
|
|
fn clone(&self) -> Self {
|
|
Self {
|
|
inner: self.inner.clone(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T, E: EthSpec> Deref for ForkService<T, E> {
|
|
type Target = Inner<T, E>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.inner.deref()
|
|
}
|
|
}
|
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
|
|
/// Returns the last fork downloaded from the beacon node, if any.
|
|
pub fn fork(&self) -> Option<Fork> {
|
|
self.fork.read().clone()
|
|
}
|
|
|
|
/// Starts the service that periodically polls for the `Fork`.
|
|
pub fn start_update_service(&self, spec: &ChainSpec) -> Result<Signal, String> {
|
|
let log = self.context.log.clone();
|
|
|
|
let duration_to_next_epoch = self
|
|
.slot_clock
|
|
.duration_to_next_epoch(E::slots_per_epoch())
|
|
.ok_or_else(|| "Unable to determine duration to next epoch".to_string())?;
|
|
|
|
let interval = {
|
|
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
|
|
Interval::new(
|
|
Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT,
|
|
slot_duration * E::slots_per_epoch() as u32,
|
|
)
|
|
};
|
|
|
|
let (exit_signal, exit_fut) = exit_future::signal();
|
|
let service = self.clone();
|
|
let log_1 = log.clone();
|
|
let log_2 = log.clone();
|
|
|
|
// Run an immediate update before starting the updater service.
|
|
self.context.executor.spawn(service.clone().do_update());
|
|
|
|
self.context.executor.spawn(
|
|
exit_fut
|
|
.until(
|
|
interval
|
|
.map_err(move |e| {
|
|
crit! {
|
|
log_1,
|
|
"Timer thread failed";
|
|
"error" => format!("{}", e)
|
|
}
|
|
})
|
|
.for_each(move |_| service.do_update())
|
|
// Prevent any errors from escaping and stopping the interval.
|
|
.then(|_| Ok(())),
|
|
)
|
|
.map(move |_| info!(log_2, "Shutdown complete")),
|
|
);
|
|
|
|
Ok(exit_signal)
|
|
}
|
|
|
|
/// Attempts to download the `Fork` from the server.
|
|
fn do_update(&self) -> impl Future<Item = (), Error = ()> {
|
|
let service_1 = self.clone();
|
|
let log_1 = service_1.context.log.clone();
|
|
let log_2 = service_1.context.log.clone();
|
|
|
|
self.inner
|
|
.beacon_node
|
|
.http
|
|
.beacon()
|
|
.get_fork()
|
|
.map(move |fork| *(service_1.fork.write()) = Some(fork))
|
|
.map(move |_| trace!(log_1, "Fork update success"))
|
|
.map_err(move |e| {
|
|
trace!(
|
|
log_2,
|
|
"Fork update failed";
|
|
"error" => format!("Error retrieving fork: {:?}", e)
|
|
)
|
|
})
|
|
// Returning an error will stop the interval. This is not desired, a single failure
|
|
// should not stop all future attempts.
|
|
.then(|_| Ok(()))
|
|
}
|
|
}
|