Restructure of validator client service and block producer
This commit is contained in:
parent
eea772de3e
commit
9cdb7bb716
@ -2,12 +2,12 @@ use super::EpochDuties;
|
||||
use types::{Epoch, PublicKey};
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum BeaconNodeError {
|
||||
pub enum BeaconNodeDutiesError {
|
||||
RemoteFailure(String),
|
||||
}
|
||||
|
||||
/// Defines the methods required to obtain a validators shuffling from a Beacon Node.
|
||||
pub trait BeaconNode: Send + Sync {
|
||||
pub trait BeaconNodeDuties: Send + Sync {
|
||||
/// Gets the duties for all validators.
|
||||
///
|
||||
/// Returns a vector of EpochDuties for each validator public key. The entry will be None for
|
||||
@ -16,5 +16,5 @@ pub trait BeaconNode: Send + Sync {
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
pubkeys: &[PublicKey],
|
||||
) -> Result<EpochDuties, BeaconNodeError>;
|
||||
) -> Result<EpochDuties, BeaconNodeDutiesError>;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use super::beacon_node_duties::{BeaconNodeDuties, BeaconNodeDutiesError};
|
||||
use super::epoch_duties::{EpochDuties, EpochDuty};
|
||||
use super::traits::{BeaconNode, BeaconNodeError};
|
||||
use grpcio::CallOption;
|
||||
use protos::services::{GetDutiesRequest, Validators};
|
||||
use protos::services_grpc::ValidatorServiceClient;
|
||||
@ -8,13 +8,13 @@ use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use types::{Epoch, PublicKey, Slot};
|
||||
|
||||
impl BeaconNode for ValidatorServiceClient {
|
||||
impl BeaconNodeDuties for ValidatorServiceClient {
|
||||
/// Requests all duties (block signing and committee attesting) from the Beacon Node (BN).
|
||||
fn request_duties(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
pubkeys: &[PublicKey],
|
||||
) -> Result<EpochDuties, BeaconNodeError> {
|
||||
) -> Result<EpochDuties, BeaconNodeDutiesError> {
|
||||
// Get the required duties from all validators
|
||||
// build the request
|
||||
let mut req = GetDutiesRequest::new();
|
||||
@ -29,7 +29,7 @@ impl BeaconNode for ValidatorServiceClient {
|
||||
// send the request, get the duties reply
|
||||
let reply = self
|
||||
.get_validator_duties(&req)
|
||||
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
|
||||
.map_err(|err| BeaconNodeDutiesError::RemoteFailure(format!("{:?}", err)))?;
|
||||
|
||||
let mut epoch_duties: HashMap<PublicKey, Option<EpochDuty>> = HashMap::new();
|
||||
for (index, validator_duty) in reply.get_active_validators().iter().enumerate() {
|
||||
|
@ -1,13 +1,13 @@
|
||||
mod beacon_node_duties;
|
||||
mod epoch_duties;
|
||||
mod grpc;
|
||||
// TODO: reintroduce tests
|
||||
//#[cfg(test)]
|
||||
//mod test_node;
|
||||
mod traits;
|
||||
|
||||
pub use self::beacon_node_duties::{BeaconNodeDuties, BeaconNodeDutiesError};
|
||||
use self::epoch_duties::{EpochDuties, EpochDutiesMapError};
|
||||
pub use self::epoch_duties::{EpochDutiesMap, WorkInfo};
|
||||
use self::traits::{BeaconNode, BeaconNodeError};
|
||||
use futures::Async;
|
||||
use slog::{debug, error, info};
|
||||
use std::sync::Arc;
|
||||
@ -29,7 +29,7 @@ pub enum UpdateOutcome {
|
||||
pub enum Error {
|
||||
DutiesMapPoisoned,
|
||||
EpochMapPoisoned,
|
||||
BeaconNodeError(BeaconNodeError),
|
||||
BeaconNodeDutiesError(BeaconNodeDutiesError),
|
||||
UnknownEpoch,
|
||||
UnknownValidator,
|
||||
}
|
||||
@ -38,14 +38,14 @@ pub enum Error {
|
||||
/// Node.
|
||||
///
|
||||
/// This keeps track of all validator keys and required voting slots.
|
||||
pub struct DutiesManager<U: BeaconNode> {
|
||||
pub struct DutiesManager<U: BeaconNodeDuties> {
|
||||
pub duties_map: RwLock<EpochDutiesMap>,
|
||||
/// A list of all public keys known to the validator service.
|
||||
pub pubkeys: Vec<PublicKey>,
|
||||
pub beacon_node: Arc<U>,
|
||||
}
|
||||
|
||||
impl<U: BeaconNode> DutiesManager<U> {
|
||||
impl<U: BeaconNodeDuties> DutiesManager<U> {
|
||||
/// Check the Beacon Node for `EpochDuties`.
|
||||
///
|
||||
/// be a wall-clock (e.g., system time, remote server time, etc.).
|
||||
@ -112,9 +112,9 @@ impl<U: BeaconNode> DutiesManager<U> {
|
||||
}
|
||||
|
||||
//TODO: Use error_chain to handle errors
|
||||
impl From<BeaconNodeError> for Error {
|
||||
fn from(e: BeaconNodeError) -> Error {
|
||||
Error::BeaconNodeError(e)
|
||||
impl From<BeaconNodeDutiesError> for Error {
|
||||
fn from(e: BeaconNodeDutiesError) -> Error {
|
||||
Error::BeaconNodeDutiesError(e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
mod attester_service;
|
||||
mod block_producer_service;
|
||||
mod block_producer;
|
||||
mod config;
|
||||
mod duties;
|
||||
pub mod error;
|
||||
|
@ -9,15 +9,14 @@
|
||||
/// data from the beacon node and performs the signing before publishing the block to the beacon
|
||||
/// node.
|
||||
use crate::attester_service::{AttestationGrpcClient, AttesterService};
|
||||
use crate::block_producer::BlockProducer;
|
||||
use crate::block_producer_service::BeaconBlockGrpcClient;
|
||||
use crate::config::Config as ValidatorConfig;
|
||||
use crate::duties::UpdateOutcome;
|
||||
use crate::duties::{DutiesManager, EpochDutiesMap};
|
||||
use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap, UpdateOutcome};
|
||||
use crate::error as error_chain;
|
||||
use crate::error::ErrorKind;
|
||||
use attester::test_utils::EpochMap;
|
||||
use attester::{test_utils::LocalSigner as AttesterLocalSigner, Attester};
|
||||
use block_proposer::{test_utils::LocalSigner as BlockProposerLocalSigner, BlockProducer};
|
||||
use bls::Keypair;
|
||||
use grpcio::{ChannelBuilder, EnvBuilder};
|
||||
use protos::services::Empty;
|
||||
@ -35,13 +34,13 @@ use tokio::runtime::Builder;
|
||||
use tokio::timer::Interval;
|
||||
use tokio_timer::clock::Clock;
|
||||
use types::test_utils::generate_deterministic_keypairs;
|
||||
use types::{Epoch, Fork, Slot};
|
||||
use types::{ChainSpec, Epoch, Fork, Slot};
|
||||
|
||||
//TODO: This service should be simplified in the future. Can be made more steamlined.
|
||||
|
||||
/// The validator service. This is the main thread that executes and maintains validator
|
||||
/// duties.
|
||||
pub struct Service {
|
||||
pub struct Service<B: BeaconNodeDuties + 'static> {
|
||||
/// The node we currently connected to.
|
||||
connected_node_version: String,
|
||||
/// The chain id we are processing on.
|
||||
@ -50,28 +49,25 @@ pub struct Service {
|
||||
slot_clock: SystemTimeSlotClock,
|
||||
/// The current slot we are processing.
|
||||
current_slot: Slot,
|
||||
/// The number of slots per epoch to allow for converting slots to epochs.
|
||||
slots_per_epoch: u64,
|
||||
/// The chain specification for this clients instance.
|
||||
spec: Arc<ChainSpec>,
|
||||
/// The duties manager which maintains the state of when to perform actions.
|
||||
duties_manager: Arc<DutiesManager<B>>,
|
||||
// GRPC Clients
|
||||
/// The beacon block GRPC client.
|
||||
beacon_block_client: Arc<BeaconBlockServiceClient>,
|
||||
/// The validator GRPC client.
|
||||
validator_client: Arc<ValidatorServiceClient>,
|
||||
beacon_block_client: Arc<BeaconBlockGrpcClient>,
|
||||
/// The attester GRPC client.
|
||||
attester_client: Arc<AttestationServiceClient>,
|
||||
/// The validator client logger.
|
||||
log: slog::Logger,
|
||||
}
|
||||
|
||||
impl Service {
|
||||
impl<B: BeaconNodeDuties + 'static> Service<B> {
|
||||
/// Initial connection to the beacon node to determine its properties.
|
||||
///
|
||||
/// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients
|
||||
/// and returns an instance of the service.
|
||||
fn initialize_service(
|
||||
config: &ValidatorConfig,
|
||||
log: slog::Logger,
|
||||
) -> error_chain::Result<Self> {
|
||||
fn initialize_service(config: ValidatorConfig, log: slog::Logger) -> error_chain::Result<Self> {
|
||||
// initialise the beacon node client to check for a connection
|
||||
|
||||
let env = Arc::new(EnvBuilder::new().build());
|
||||
@ -139,7 +135,9 @@ impl Service {
|
||||
// Beacon node gRPC beacon block endpoints.
|
||||
let beacon_block_client = {
|
||||
let ch = ChannelBuilder::new(env.clone()).connect(&config.server);
|
||||
Arc::new(BeaconBlockServiceClient::new(ch))
|
||||
let beacon_block_service_client = Arc::new(BeaconBlockServiceClient::new(ch));
|
||||
// a wrapper around the service client to implement the beacon block node trait
|
||||
Arc::new(BeaconBlockGrpcClient::new(beacon_block_service_client))
|
||||
};
|
||||
|
||||
// Beacon node gRPC validator endpoints.
|
||||
@ -164,14 +162,37 @@ impl Service {
|
||||
.map_err(|e| ErrorKind::SlotClockError(e))?
|
||||
.expect("Genesis must be in the future");
|
||||
|
||||
let spec = Arc::new(config.spec);
|
||||
|
||||
/* Generate the duties manager */
|
||||
|
||||
// generate keypairs
|
||||
|
||||
// TODO: keypairs are randomly generated; they should be loaded from a file or generated.
|
||||
// https://github.com/sigp/lighthouse/issues/160
|
||||
let keypairs = Arc::new(generate_deterministic_keypairs(8));
|
||||
|
||||
// Builds a mapping of Epoch -> Map(PublicKey, EpochDuty)
|
||||
// where EpochDuty contains slot numbers and attestation data that each validator needs to
|
||||
// produce work on.
|
||||
let duties_map = RwLock::new(EpochDutiesMap::new(config.spec.slots_per_epoch));
|
||||
|
||||
// builds a manager which maintains the list of current duties for all known validators
|
||||
// and can check when a validator needs to perform a task.
|
||||
let duties_manager = Arc::new(DutiesManager {
|
||||
duties_map,
|
||||
pubkeys: keypairs.iter().map(|keypair| keypair.pk.clone()).collect(),
|
||||
beacon_node: validator_client,
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
connected_node_version: node_info.version,
|
||||
fork,
|
||||
slot_clock,
|
||||
current_slot,
|
||||
slots_per_epoch: config.spec.slots_per_epoch,
|
||||
spec,
|
||||
duties_manager,
|
||||
beacon_block_client,
|
||||
validator_client,
|
||||
attester_client,
|
||||
log,
|
||||
})
|
||||
@ -180,7 +201,7 @@ impl Service {
|
||||
/// Initialise the service then run the core thread.
|
||||
pub fn start(config: ValidatorConfig, log: slog::Logger) -> error_chain::Result<()> {
|
||||
// connect to the node and retrieve its properties and initialize the gRPC clients
|
||||
let service = Service::initialize_service(&config, log)?;
|
||||
let service = Service::initialize_service(config, log)?;
|
||||
|
||||
// we have connected to a node and established its parameters. Spin up the core service
|
||||
|
||||
@ -205,137 +226,95 @@ impl Service {
|
||||
Interval::new(Instant::now() + duration_to_next_slot, slot_duration)
|
||||
};
|
||||
|
||||
/* kick off core service */
|
||||
|
||||
// generate keypairs
|
||||
|
||||
// TODO: keypairs are randomly generated; they should be loaded from a file or generated.
|
||||
// https://github.com/sigp/lighthouse/issues/160
|
||||
let keypairs = Arc::new(generate_deterministic_keypairs(8));
|
||||
|
||||
/* build requisite objects to pass to core thread */
|
||||
|
||||
// Builds a mapping of Epoch -> Map(PublicKey, EpochDuty)
|
||||
// where EpochDuty contains slot numbers and attestation data that each validator needs to
|
||||
// produce work on.
|
||||
let duties_map = RwLock::new(EpochDutiesMap::new(config.spec.slots_per_epoch));
|
||||
|
||||
// builds a manager which maintains the list of current duties for all known validators
|
||||
// and can check when a validator needs to perform a task.
|
||||
let manager = Arc::new(DutiesManager {
|
||||
duties_map,
|
||||
pubkeys: keypairs.iter().map(|keypair| keypair.pk.clone()).collect(),
|
||||
beacon_node: service.validator_client.clone(),
|
||||
});
|
||||
|
||||
// run the core thread
|
||||
/* kick off the core service */
|
||||
runtime.block_on(
|
||||
interval
|
||||
.for_each(move |_| {
|
||||
let log = service.log.clone();
|
||||
|
||||
/* get the current slot and epoch */
|
||||
let current_slot = match service.slot_clock.present_slot() {
|
||||
Err(e) => {
|
||||
error!(log, "SystemTimeError {:?}", e);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(slot) => slot.expect("Genesis is in the future"),
|
||||
};
|
||||
|
||||
let current_epoch = current_slot.epoch(service.slots_per_epoch);
|
||||
|
||||
debug_assert!(
|
||||
current_slot > service.current_slot,
|
||||
"The Timer should poll a new slot"
|
||||
);
|
||||
|
||||
info!(log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64());
|
||||
|
||||
/* check for new duties */
|
||||
|
||||
let cloned_manager = manager.clone();
|
||||
let cloned_log = log.clone();
|
||||
// spawn a new thread separate to the runtime
|
||||
std::thread::spawn(move || {
|
||||
let _empty_error = cloned_manager.run_update(current_epoch.clone(), cloned_log.clone());
|
||||
});
|
||||
|
||||
/* execute any specified duties */
|
||||
|
||||
if let Some(work) = manager.get_current_work(current_slot) {
|
||||
for (_public_key, work_type) in work {
|
||||
if work_type.produce_block {
|
||||
// TODO: Produce a beacon block in a new thread
|
||||
}
|
||||
if work_type.attestation_duty.is_some() {
|
||||
// available AttestationDuty info
|
||||
let attestation_duty =
|
||||
work_type.attestation_duty.expect("Cannot be None");
|
||||
//TODO: Produce an attestation in a new thread
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if a non-fatal error occurs, proceed to the next slot.
|
||||
let _ignore_error = service.per_slot_execution();
|
||||
// completed a slot process
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| format!("Service thread failed: {:?}", e)),
|
||||
);
|
||||
|
||||
// completed a slot process
|
||||
// validator client exited
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/*
|
||||
// Spawn a new thread to perform block production for the validator.
|
||||
let producer_thread = {
|
||||
let spec = spec.clone();
|
||||
let signer = Arc::new(BlockProposerLocalSigner::new(keypair.clone()));
|
||||
let duties_map = duties_map.clone();
|
||||
let slot_clock = slot_clock.clone();
|
||||
let log = log.clone();
|
||||
let client = Arc::new(BeaconBlockGrpcClient::new(beacon_block_grpc_client.clone()));
|
||||
thread::spawn(move || {
|
||||
let block_producer =
|
||||
BlockProducer::new(spec, duties_map, slot_clock, client, signer);
|
||||
let mut block_producer_service = BlockProducerService {
|
||||
block_producer,
|
||||
poll_interval_millis,
|
||||
log,
|
||||
};
|
||||
/// The execution logic that runs every slot.
|
||||
// Errors are logged to output, and core execution continues unless fatal errors occur.
|
||||
fn per_slot_execution(&mut self) -> error_chain::Result<()> {
|
||||
/* get the new current slot and epoch */
|
||||
self.update_current_slot()?;
|
||||
|
||||
block_producer_service.run();
|
||||
})
|
||||
};
|
||||
/* check for new duties */
|
||||
self.check_for_duties();
|
||||
|
||||
// Spawn a new thread for attestation for the validator.
|
||||
let attester_thread = {
|
||||
let signer = Arc::new(AttesterLocalSigner::new(keypair.clone()));
|
||||
let epoch_map = epoch_map_for_attester.clone();
|
||||
let slot_clock = slot_clock.clone();
|
||||
let log = log.clone();
|
||||
let client = Arc::new(AttestationGrpcClient::new(attester_grpc_client.clone()));
|
||||
thread::spawn(move || {
|
||||
let attester = Attester::new(epoch_map, slot_clock, client, signer);
|
||||
let mut attester_service = AttesterService {
|
||||
attester,
|
||||
poll_interval_millis,
|
||||
log,
|
||||
};
|
||||
/* process any required duties for validators */
|
||||
self.process_duties();
|
||||
|
||||
attester_service.run();
|
||||
})
|
||||
};
|
||||
|
||||
threads.push((duties_manager_thread, producer_thread, attester_thread));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Naively wait for all the threads to complete.
|
||||
for tuple in threads {
|
||||
let (manager, producer, attester) = tuple;
|
||||
let _ = producer.join();
|
||||
let _ = manager.join();
|
||||
let _ = attester.join();
|
||||
/// Updates the known current slot and epoch.
|
||||
fn update_current_slot(&mut self) -> error_chain::Result<()> {
|
||||
let current_slot = match self.slot_clock.present_slot() {
|
||||
Err(e) => {
|
||||
error!(self.log, "SystemTimeError {:?}", e);
|
||||
return Err("Could not read system time".into());
|
||||
}
|
||||
Ok(slot) => slot.expect("Genesis is in the future"),
|
||||
};
|
||||
|
||||
let current_epoch = current_slot.epoch(self.spec.slots_per_epoch);
|
||||
|
||||
// this is a fatal error. If the slot clock repeats, there is something wrong with
|
||||
// the timer, terminate immediately.
|
||||
assert!(
|
||||
current_slot > self.current_slot,
|
||||
"The Timer should poll a new slot"
|
||||
);
|
||||
self.current_slot = current_slot;
|
||||
info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// For all known validator keypairs, update any known duties from the beacon node.
|
||||
fn check_for_duties(&mut self) {
|
||||
let cloned_manager = self.duties_manager.clone();
|
||||
let cloned_log = self.log.clone();
|
||||
let current_epoch = self.current_slot.epoch(self.spec.slots_per_epoch);
|
||||
// spawn a new thread separate to the runtime
|
||||
// TODO: Handle thread termination/timeout
|
||||
std::thread::spawn(move || {
|
||||
// the return value is a future which returns ready.
|
||||
// built to be compatible with the tokio runtime.
|
||||
let _empty = cloned_manager.run_update(current_epoch.clone(), cloned_log.clone());
|
||||
});
|
||||
}
|
||||
|
||||
/// If there are any duties to process, spawn a separate thread and perform required actions.
|
||||
fn process_duties(&mut self) {
|
||||
if let Some(work) = self.duties_manager.get_current_work(self.current_slot) {
|
||||
for (_public_key, work_type) in work {
|
||||
if work_type.produce_block {
|
||||
// spawns a thread to produce a beacon block
|
||||
std::thread::spawn(move || {
|
||||
let block_producer = BlockProducer {
|
||||
fork: self.fork,
|
||||
slot: self.current_slot,
|
||||
spec: self.spec.clone(),
|
||||
};
|
||||
});
|
||||
|
||||
// TODO: Produce a beacon block in a new thread
|
||||
}
|
||||
if work_type.attestation_duty.is_some() {
|
||||
// available AttestationDuty info
|
||||
let attestation_duty = work_type.attestation_duty.expect("Cannot be None");
|
||||
//TODO: Produce an attestation in a new thread
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user