Add new validator API for voluntary exit (#4119)

## Issue Addressed

Addresses #4117 

## Proposed Changes

See https://github.com/ethereum/keymanager-APIs/pull/58 for proposed API specification.

## TODO

- [x] ~~Add submission to BN~~ 
  - removed, see discussion in [keymanager API](https://github.com/ethereum/keymanager-APIs/pull/58)
- [x] ~~Add flag to allow voluntary exit via the API~~ 
  - no longer needed now the VC doesn't submit exit directly
- [x] ~~Additional verification / checks, e.g. if validator on same network as BN~~ 
  - to be done on client side
- [x] ~~Potentially wait for the message to propagate and return some exit information in the response~~ 
  - not required
- [x] Update http tests
- [x] ~~Update lighthouse book~~ 
  - not required if this endpoint makes it to the standard keymanager API

Co-authored-by: Paul Hauner <paul@paulhauner.com>
Co-authored-by: Jimmy Chen <jimmy@sigmaprime.io>
This commit is contained in:
Jimmy Chen 2023-04-03 03:02:56 +00:00
parent 2de3451011
commit e2c68c8893
10 changed files with 256 additions and 9 deletions

View File

@ -642,6 +642,30 @@ impl ValidatorClientHttpClient {
let url = self.make_gas_limit_url(pubkey)?;
self.delete_with_raw_response(url, &()).await
}
/// `POST /eth/v1/validator/{pubkey}/voluntary_exit`
pub async fn post_validator_voluntary_exit(
&self,
pubkey: &PublicKeyBytes,
epoch: Option<Epoch>,
) -> Result<SignedVoluntaryExit, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("eth")
.push("v1")
.push("validator")
.push(&pubkey.to_string())
.push("voluntary_exit");
if let Some(epoch) = epoch {
path.query_pairs_mut()
.append_pair("epoch", &epoch.to_string());
}
self.post(path, &()).await
}
}
/// Returns `Ok(response)` if the response is a `200 OK` response or a

View File

@ -144,3 +144,8 @@ pub struct UpdateGasLimitRequest {
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub gas_limit: u64,
}
#[derive(Deserialize)]
pub struct VoluntaryExitQuery {
pub epoch: Option<Epoch>,
}

View File

@ -0,0 +1,69 @@
use crate::validator_store::ValidatorStore;
use bls::{PublicKey, PublicKeyBytes};
use slog::{info, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit};
pub async fn create_signed_voluntary_exit<T: 'static + SlotClock + Clone, E: EthSpec>(
pubkey: PublicKey,
maybe_epoch: Option<Epoch>,
validator_store: Arc<ValidatorStore<T, E>>,
slot_clock: T,
log: Logger,
) -> Result<SignedVoluntaryExit, warp::Rejection> {
let epoch = match maybe_epoch {
Some(epoch) => epoch,
None => get_current_epoch::<T, E>(slot_clock).ok_or_else(|| {
warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string())
})?,
};
let pubkey_bytes = PublicKeyBytes::from(pubkey);
if !validator_store.has_validator(&pubkey_bytes) {
return Err(warp_utils::reject::custom_not_found(format!(
"{} is disabled or not managed by this validator client",
pubkey_bytes.as_hex_string()
)));
}
let validator_index = validator_store
.validator_index(&pubkey_bytes)
.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"The validator index for {} is not known. The validator client \
may still be initializing or the validator has not yet had a \
deposit processed.",
pubkey_bytes.as_hex_string()
))
})?;
let voluntary_exit = VoluntaryExit {
epoch,
validator_index,
};
info!(
log,
"Signing voluntary exit";
"validator" => pubkey_bytes.as_hex_string(),
"epoch" => epoch
);
let signed_voluntary_exit = validator_store
.sign_voluntary_exit(pubkey_bytes, voluntary_exit)
.await
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"Failed to sign voluntary exit: {:?}",
e
))
})?;
Ok(signed_voluntary_exit)
}
/// Calculates the current epoch from the genesis time and current time.
fn get_current_epoch<T: 'static + SlotClock + Clone, E: EthSpec>(slot_clock: T) -> Option<Epoch> {
slot_clock.now().map(|s| s.epoch(E::slots_per_epoch()))
}

View File

@ -1,9 +1,11 @@
mod api_secret;
mod create_signed_voluntary_exit;
mod create_validator;
mod keystores;
mod remotekeys;
mod tests;
use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit;
use crate::{determine_graffiti, GraffitiFile, ValidatorStore};
use account_utils::{
mnemonic_from_phrase,
@ -71,6 +73,7 @@ pub struct Context<T: SlotClock, E: EthSpec> {
pub spec: ChainSpec,
pub config: Config,
pub log: Logger,
pub slot_clock: T,
pub _phantom: PhantomData<E>,
}
@ -189,6 +192,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
let inner_ctx = ctx.clone();
let log_filter = warp::any().map(move || inner_ctx.log.clone());
let inner_slot_clock = ctx.slot_clock.clone();
let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone());
let inner_spec = Arc::new(ctx.spec.clone());
let spec_filter = warp::any().map(move || inner_spec.clone());
@ -904,6 +910,46 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
)
.map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT));
// POST /eth/v1/validator/{pubkey}/voluntary_exit
let post_validators_voluntary_exits = eth_v1
.and(warp::path("validator"))
.and(warp::path::param::<PublicKey>())
.and(warp::path("voluntary_exit"))
.and(warp::query::<api_types::VoluntaryExitQuery>())
.and(warp::path::end())
.and(validator_store_filter.clone())
.and(slot_clock_filter)
.and(log_filter.clone())
.and(signer.clone())
.and(task_executor_filter.clone())
.and_then(
|pubkey: PublicKey,
query: api_types::VoluntaryExitQuery,
validator_store: Arc<ValidatorStore<T, E>>,
slot_clock: T,
log,
signer,
task_executor: TaskExecutor| {
blocking_signed_json_task(signer, move || {
if let Some(handle) = task_executor.handle() {
let signed_voluntary_exit =
handle.block_on(create_signed_voluntary_exit(
pubkey,
query.epoch,
validator_store,
slot_clock,
log,
))?;
Ok(signed_voluntary_exit)
} else {
Err(warp_utils::reject::custom_server_error(
"Lighthouse shutting down".into(),
))
}
})
},
);
// GET /eth/v1/keystores
let get_std_keystores = std_keystores
.and(signer.clone())
@ -1001,6 +1047,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
.or(post_validators_keystore)
.or(post_validators_mnemonic)
.or(post_validators_web3signer)
.or(post_validators_voluntary_exits)
.or(post_fee_recipient)
.or(post_gas_limit)
.or(post_std_keystores)

View File

@ -45,6 +45,7 @@ struct ApiTester {
initialized_validators: Arc<RwLock<InitializedValidators>>,
validator_store: Arc<ValidatorStore<TestingSlotClock, E>>,
url: SensitiveUrl,
slot_clock: TestingSlotClock,
_server_shutdown: oneshot::Sender<()>,
_validator_dir: TempDir,
_runtime_shutdown: exit_future::Signal,
@ -90,8 +91,12 @@ impl ApiTester {
let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME);
let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap();
let slot_clock =
TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1));
let genesis_time: u64 = 0;
let slot_clock = TestingSlotClock::new(
Slot::new(0),
Duration::from_secs(genesis_time),
Duration::from_secs(1),
);
let (runtime_shutdown, exit) = exit_future::signal();
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
@ -101,9 +106,9 @@ impl ApiTester {
initialized_validators,
slashing_protection,
Hash256::repeat_byte(42),
spec,
spec.clone(),
Some(Arc::new(DoppelgangerService::new(log.clone()))),
slot_clock,
slot_clock.clone(),
&config,
executor.clone(),
log.clone(),
@ -129,7 +134,8 @@ impl ApiTester {
listen_port: 0,
allow_origin: None,
},
log,
log: log.clone(),
slot_clock: slot_clock.clone(),
_phantom: PhantomData,
});
let ctx = context.clone();
@ -156,6 +162,7 @@ impl ApiTester {
initialized_validators,
validator_store,
url,
slot_clock,
_server_shutdown: shutdown_tx,
_validator_dir: validator_dir,
_runtime_shutdown: runtime_shutdown,
@ -494,6 +501,33 @@ impl ApiTester {
self
}
pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option<Epoch>) -> Self {
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
// manually setting validator index in `ValidatorStore`
self.initialized_validators
.write()
.set_index(&validator.voting_pubkey, 0);
let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch());
let resp = self
.client
.post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch)
.await;
assert!(resp.is_ok());
assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch);
self
}
fn get_current_epoch(&self) -> Epoch {
self.slot_clock
.now()
.map(|s| s.epoch(E::slots_per_epoch()))
.unwrap()
}
pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self {
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
@ -778,6 +812,29 @@ fn hd_validator_creation() {
});
}
#[test]
fn validator_exit() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.test_sign_voluntary_exits(0, None)
.await
.test_sign_voluntary_exits(0, Some(Epoch::new(256)))
.await;
});
}
#[test]
fn validator_enabling() {
let runtime = build_runtime();

View File

@ -88,6 +88,11 @@ lazy_static::lazy_static! {
"Total count of attempted SyncSelectionProof signings",
&["status"]
);
pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"vc_signed_voluntary_exits_total",
"Total count of VoluntaryExit signings",
&["status"]
);
pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"builder_validator_registrations_total",
"Total count of ValidatorRegistrationData signings",

View File

@ -94,6 +94,7 @@ pub struct ProductionValidatorClient<T: EthSpec> {
doppelganger_service: Option<Arc<DoppelgangerService>>,
preparation_service: PreparationService<SystemTimeSlotClock, T>,
validator_store: Arc<ValidatorStore<SystemTimeSlotClock, T>>,
slot_clock: SystemTimeSlotClock,
http_api_listen_addr: Option<SocketAddr>,
config: Config,
}
@ -461,7 +462,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
let sync_committee_service = SyncCommitteeService::new(
duties_service.clone(),
validator_store.clone(),
slot_clock,
slot_clock.clone(),
beacon_nodes.clone(),
context.service_context("sync_committee".into()),
);
@ -482,6 +483,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
preparation_service,
validator_store,
config,
slot_clock,
http_api_listen_addr: None,
})
}
@ -544,6 +546,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
graffiti_flag: self.config.graffiti,
spec: self.context.eth2_config.spec.clone(),
config: self.config.http_api.clone(),
slot_clock: self.slot_clock.clone(),
log: log.clone(),
_phantom: PhantomData,
});

View File

@ -47,6 +47,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullP
},
SignedContributionAndProof(&'a ContributionAndProof<T>),
ValidatorRegistration(&'a ValidatorRegistrationData),
VoluntaryExit(&'a VoluntaryExit),
}
impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Payload> {
@ -67,6 +68,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay
} => beacon_block_root.signing_root(domain),
SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain),
SignableMessage::ValidatorRegistration(v) => v.signing_root(domain),
SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain),
}
}
}
@ -203,6 +205,7 @@ impl SigningMethod {
SignableMessage::ValidatorRegistration(v) => {
Web3SignerObject::ValidatorRegistration(v)
}
SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e),
};
// Determine the Web3Signer message type.

View File

@ -62,7 +62,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload<T>> {
RandaoReveal {
epoch: Epoch,
},
#[allow(dead_code)]
VoluntaryExit(&'a VoluntaryExit),
SyncCommitteeMessage {
beacon_block_root: Hash256,

View File

@ -22,8 +22,9 @@ use types::{
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof,
Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof,
Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot,
SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution,
SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData,
SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId,
ValidatorRegistrationData, VoluntaryExit,
};
use validator_dir::ValidatorDir;
@ -155,6 +156,14 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
self.validators.clone()
}
/// Indicates if the `voting_public_key` exists in self and is enabled.
pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool {
self.validators
.read()
.validator(voting_public_key)
.is_some()
}
/// Insert a new validator to `self`, where the validator is represented by an EIP-2335
/// keystore on the filesystem.
#[allow(clippy::too_many_arguments)]
@ -616,6 +625,32 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
}
}
pub async fn sign_voluntary_exit(
&self,
validator_pubkey: PublicKeyBytes,
voluntary_exit: VoluntaryExit,
) -> Result<SignedVoluntaryExit, Error> {
let signing_epoch = voluntary_exit.epoch;
let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch);
let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?;
let signature = signing_method
.get_signature::<E, BlindedPayload<E>>(
SignableMessage::VoluntaryExit(&voluntary_exit),
signing_context,
&self.spec,
&self.task_executor,
)
.await?;
metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]);
Ok(SignedVoluntaryExit {
message: voluntary_exit,
signature,
})
}
pub async fn sign_validator_registration_data(
&self,
validator_registration_data: ValidatorRegistrationData,