From e961ff60b49c17f93ad42597ff119fa60aa8015c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 30 Jan 2022 23:22:04 +0000 Subject: [PATCH] Implement standard keystore API (#2736) ## Issue Addressed Implements the standard key manager API from https://ethereum.github.io/keymanager-APIs/, formerly https://github.com/ethereum/beacon-APIs/pull/151 Related to https://github.com/sigp/lighthouse/issues/2557 ## Proposed Changes - [x] Add all of the new endpoints from the standard API: GET, POST and DELETE. - [x] Add a `validators.enabled` column to the slashing protection database to support atomic disable + export. - [x] Add tests for all the common sequential accesses of the API - [x] Add tests for interactions with remote signer validators - [x] Add end-to-end tests for migration of validators from one VC to another - [x] Implement the authentication scheme from the standard (token bearer auth) ## Additional Info The `enabled` column in the validators SQL database is necessary to prevent a race condition when exporting slashing protection data. Without the slashing protection database having a way of knowing that a key has been disabled, a concurrent request to sign a message could insert a new record into the database. The `delete_concurrent_with_signing` test exercises this code path, and was indeed failing before the `enabled` column was added. The validator client authentication has been modified from basic auth to bearer auth, with basic auth preserved for backwards compatibility. --- Cargo.lock | 12 + book/src/api-vc-auth-header.md | 21 +- book/src/api-vc-endpoints.md | 47 +- book/src/api-vc.md | 9 +- common/account_utils/src/lib.rs | 16 +- .../src/validator_definitions.rs | 14 +- common/eth2/Cargo.toml | 3 +- common/eth2/src/lib.rs | 9 + common/eth2/src/lighthouse_vc/http_client.rs | 232 ++++- common/eth2/src/lighthouse_vc/mod.rs | 1 + common/eth2/src/lighthouse_vc/std_types.rs | 104 ++ common/eth2/src/lighthouse_vc/types.rs | 1 + common/validator_dir/src/builder.rs | 9 +- consensus/serde_utils/Cargo.toml | 4 +- consensus/serde_utils/src/json_str.rs | 25 + consensus/serde_utils/src/lib.rs | 1 + validator_client/Cargo.toml | 3 +- .../slashing_protection/Cargo.toml | 5 + .../v0_no_enabled_column.sqlite | Bin 0 -> 28672 bytes .../slashing_protection/src/lib.rs | 1 + .../src/registration_tests.rs | 42 + .../src/slashing_database.rs | 135 ++- .../slashing_protection/tests/main.rs | 2 + .../slashing_protection/tests/migration.rs | 68 ++ validator_client/src/http_api/api_secret.rs | 25 +- validator_client/src/http_api/keystores.rs | 290 ++++++ validator_client/src/http_api/mod.rs | 106 +- validator_client/src/http_api/tests.rs | 50 +- .../src/http_api/tests/keystores.rs | 977 ++++++++++++++++++ .../src/initialized_validators.rs | 148 ++- validator_client/src/signing_method.rs | 3 +- validator_client/src/validator_store.rs | 48 +- 32 files changed, 2284 insertions(+), 127 deletions(-) create mode 100644 common/eth2/src/lighthouse_vc/std_types.rs create mode 100644 consensus/serde_utils/src/json_str.rs create mode 100644 validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite create mode 100644 validator_client/slashing_protection/tests/main.rs create mode 100644 validator_client/slashing_protection/tests/migration.rs create mode 100644 validator_client/src/http_api/keystores.rs create mode 100644 validator_client/src/http_api/tests/keystores.rs diff --git a/Cargo.lock b/Cargo.lock index 586cdaf18..826c01b09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1556,6 +1556,7 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "slashing_protection", "store", "types", ] @@ -4692,6 +4693,7 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", + "rand_pcg", ] [[package]] @@ -4762,6 +4764,15 @@ dependencies = [ "rand_core 0.6.3", ] +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + [[package]] name = "rand_xorshift" version = "0.2.0" @@ -6677,6 +6688,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lockfile", + "logging", "monitoring_api", "parking_lot", "rand 0.7.3", diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index d09a9e54a..33f6f6ff7 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -6,13 +6,13 @@ The validator client HTTP server requires that all requests have the following HTTP header: - Name: `Authorization` -- Value: `Basic ` +- Value: `Bearer ` Where `` is a string that can be obtained from the validator client host. Here is an example `Authorization` header: ``` -Authorization Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` ## Obtaining the API token @@ -35,12 +35,27 @@ to the file containing the api token. Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/prater/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` +The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only +one accessible without the token): + +```bash +curl http://localhost:5062/lighthouse/auth +``` + +Response: + +```json +{ + "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" +} +``` + ## Example Here is an example `curl` command using the API token in the `Authorization` header: ```bash -curl localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" +curl localhost:5062/lighthouse/version -H "Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" ``` The server should respond with its version: diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 16fd8ff8a..14d18312e 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -4,15 +4,19 @@ HTTP Path | Description | | --- | -- | -[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version -[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine -[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator -[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators -[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator -[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator +[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. +[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. +[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator. +[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. +[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. +[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. +[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. [`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. [`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. +[`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. + +In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). ## `GET /lighthouse/version` @@ -153,6 +157,37 @@ Typical Responses | 200 } ``` +## `GET /lighthouse/auth` + +Fetch the filesystem path of the [authorization token](./api-vc-auth-header.md). +Unlike the other endpoints this may be called _without_ providing an authorization token. + +This API is intended to be called from the same machine as the validator client, so that the token +file may be read by a local user with access rights. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/auth` +Method | GET +Required Headers | - +Typical Responses | 200 + +### Example Path + +``` +localhost:5062/lighthouse/auth +``` + +### Example Response Body + +```json +{ + "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" +} +``` + ## `GET /lighthouse/validators` Lists all validators managed by this validator client. diff --git a/book/src/api-vc.md b/book/src/api-vc.md index 6ee79d4f7..74c493ebe 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -1,9 +1,12 @@ # Validator Client API -Lighthouse implements a HTTP/JSON API for the validator client. Since there is -no Eth2 standard validator client API, Lighthouse has defined its own. +Lighthouse implements a JSON HTTP API for the validator client which enables programmatic management +of validators and keys. -A full list of endpoints can be found in [Endpoints](./api-vc-endpoints.md). +The API includes all of the endpoints from the [standard keymanager +API](https://ethereum.github.io/keymanager-APIs/) that is implemented by other clients and remote +signers. It also includes some Lighthouse-specific endpoints which are described in +[Endpoints](./api-vc-endpoints.md). > Note: All requests to the HTTP server must supply an > [`Authorization`](./api-vc-auth-header.md) header. All responses contain a diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index dc79a1f20..89de38038 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -85,15 +85,23 @@ pub fn write_file_via_temporary( Ok(()) } -/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN`. +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `PlainText`. pub fn random_password() -> PlainText { + random_password_raw_string().into_bytes().into() +} + +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `ZeroizeString`. +pub fn random_password_string() -> ZeroizeString { + random_password_raw_string().into() +} + +/// Common implementation for `random_password` and `random_password_string`. +fn random_password_raw_string() -> String { rand::thread_rng() .sample_iter(&Alphanumeric) .take(DEFAULT_PASSWORD_LEN) .map(char::from) - .collect::() - .into_bytes() - .into() + .collect() } /// Remove any number of newline or carriage returns from the end of a vector of bytes. diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 418c0fb3c..d66683bee 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -46,9 +46,6 @@ pub enum Error { } /// Defines how the validator client should attempt to sign messages for this validator. -/// -/// Presently there is only a single variant, however we expect more variants to arise (e.g., -/// remote signing). #[derive(Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] pub enum SigningDefinition { @@ -78,6 +75,12 @@ pub enum SigningDefinition { }, } +impl SigningDefinition { + pub fn is_local_keystore(&self) -> bool { + matches!(self, SigningDefinition::LocalKeystore { .. }) + } +} + /// A validator that may be initialized by this validator client. /// /// Presently there is only a single variant, however we expect more variants to arise (e.g., @@ -293,6 +296,11 @@ impl ValidatorDefinitions { Ok(()) } + /// Retain only the definitions matching the given predicate. + pub fn retain(&mut self, f: impl FnMut(&ValidatorDefinition) -> bool) { + self.0.retain(f); + } + /// Adds a new `ValidatorDefinition` to `self`. pub fn push(&mut self, def: ValidatorDefinition) { self.0.push(def) diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f1c9f5061..d039a0c91 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } +slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } [target.'cfg(target_os = "linux")'.dependencies] # TODO: update psutil once fix is merged: https://github.com/rust-psutil/rust-psutil/pull/93 @@ -35,4 +36,4 @@ procinfo = { version = "0.4.2", optional = true } [features] default = ["lighthouse"] -lighthouse = ["proto_array", "psutil", "procinfo", "store"] +lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 8dc808c26..608a2c9e2 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -28,6 +28,7 @@ use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; +use std::path::PathBuf; use std::time::Duration; pub const V1: EndpointVersion = EndpointVersion(1); @@ -59,6 +60,12 @@ pub enum Error { InvalidServerSentEvent(String), /// The server returned an invalid SSZ response. InvalidSsz(ssz::DecodeError), + /// An I/O error occurred while loading an API token from disk. + TokenReadError(PathBuf, std::io::Error), + /// The client has been configured without a server pubkey, but requires one for this request. + NoServerPubkey, + /// The client has been configured without an API token, but requires one for this request. + NoToken, } impl From for Error { @@ -82,6 +89,8 @@ impl Error { Error::InvalidJson(_) => None, Error::InvalidServerSentEvent(_) => None, Error::InvalidSsz(_) => None, + Error::TokenReadError(..) => None, + Error::NoServerPubkey | Error::NoToken => None, } } } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index cd640e615..e7c74668e 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -10,6 +10,9 @@ use reqwest::{ use ring::digest::{digest, SHA256}; use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; +use std::fmt::{self, Display}; +use std::fs; +use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; @@ -20,18 +23,36 @@ pub use reqwest::{Response, StatusCode, Url}; pub struct ValidatorClientHttpClient { client: reqwest::Client, server: SensitiveUrl, - secret: ZeroizeString, - server_pubkey: PublicKey, - send_authorization_header: bool, + secret: Option, + server_pubkey: Option, + authorization_header: AuthorizationHeader, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum AuthorizationHeader { + /// Do not send any Authorization header. + Omit, + /// Send a `Basic` Authorization header (legacy). + Basic, + /// Send a `Bearer` Authorization header. + Bearer, +} + +impl Display for AuthorizationHeader { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // The `Omit` variant should never be `Display`ed, but would result in a harmless rejection. + write!(f, "{:?}", self) + } } /// Parse an API token and return a secp256k1 public key. -pub fn parse_pubkey(secret: &str) -> Result { +/// +/// If the token does not start with the Lighthouse token prefix then `Ok(None)` will be returned. +/// An error will be returned if the token looks like a Lighthouse token but doesn't correspond to a +/// valid public key. +pub fn parse_pubkey(secret: &str) -> Result, Error> { let secret = if !secret.starts_with(SECRET_PREFIX) { - return Err(Error::InvalidSecret(format!( - "secret does not start with {}", - SECRET_PREFIX - ))); + return Ok(None); } else { &secret[SECRET_PREFIX.len()..] }; @@ -52,16 +73,31 @@ pub fn parse_pubkey(secret: &str) -> Result { PublicKey::parse_compressed(&arr) .map_err(|e| Error::InvalidSecret(format!("invalid secp256k1 pubkey: {:?}", e))) }) + .map(Some) } impl ValidatorClientHttpClient { + /// Create a new client pre-initialised with an API token. pub fn new(server: SensitiveUrl, secret: String) -> Result { Ok(Self { client: reqwest::Client::new(), server, server_pubkey: parse_pubkey(&secret)?, - secret: secret.into(), - send_authorization_header: true, + secret: Some(secret.into()), + authorization_header: AuthorizationHeader::Bearer, + }) + } + + /// Create a client without an API token. + /// + /// A token can be fetched by using `self.get_auth`, and then reading the token from disk. + pub fn new_unauthenticated(server: SensitiveUrl) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + secret: None, + server_pubkey: None, + authorization_header: AuthorizationHeader::Omit, }) } @@ -74,8 +110,35 @@ impl ValidatorClientHttpClient { client, server, server_pubkey: parse_pubkey(&secret)?, - secret: secret.into(), - send_authorization_header: true, + secret: Some(secret.into()), + authorization_header: AuthorizationHeader::Bearer, + }) + } + + /// Get a reference to this client's API token, if any. + pub fn api_token(&self) -> Option<&ZeroizeString> { + self.secret.as_ref() + } + + /// Read an API token from the specified `path`, stripping any trailing whitespace. + pub fn load_api_token_from_file(path: &Path) -> Result { + let token = fs::read_to_string(path).map_err(|e| Error::TokenReadError(path.into(), e))?; + Ok(ZeroizeString::from(token.trim_end().to_string())) + } + + /// Add an authentication token to use when making requests. + /// + /// If the token is Lighthouse-like, a pubkey derivation will be attempted. In the case + /// of failure the token will still be stored, and the client can continue to be used to + /// communicate with non-Lighthouse nodes. + pub fn add_auth_token(&mut self, token: ZeroizeString) -> Result<(), Error> { + let pubkey_res = parse_pubkey(token.as_str()); + + self.secret = Some(token); + self.authorization_header = AuthorizationHeader::Bearer; + + pubkey_res.map(|opt_pubkey| { + self.server_pubkey = opt_pubkey; }) } @@ -84,10 +147,20 @@ impl ValidatorClientHttpClient { /// Failing to send the `Authorization` header will cause the VC to reject requests with a 403. /// This function is intended only for testing purposes. pub fn send_authorization_header(&mut self, should_send: bool) { - self.send_authorization_header = should_send; + if should_send { + self.authorization_header = AuthorizationHeader::Bearer; + } else { + self.authorization_header = AuthorizationHeader::Omit; + } + } + + /// Use the legacy basic auth style (bearer auth preferred by default now). + pub fn use_basic_auth(&mut self) { + self.authorization_header = AuthorizationHeader::Basic; } async fn signed_body(&self, response: Response) -> Result { + let server_pubkey = self.server_pubkey.as_ref().ok_or(Error::NoServerPubkey)?; let sig = response .headers() .get("Signature") @@ -105,7 +178,7 @@ impl ValidatorClientHttpClient { .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; - Some(libsecp256k1::verify(&message, &sig, &self.server_pubkey)) + Some(libsecp256k1::verify(&message, &sig, server_pubkey)) }) .filter(|is_valid| *is_valid) .ok_or(Error::InvalidSignatureHeader)?; @@ -121,11 +194,18 @@ impl ValidatorClientHttpClient { fn headers(&self) -> Result { let mut headers = HeaderMap::new(); - if self.send_authorization_header { - let header_value = HeaderValue::from_str(&format!("Basic {}", self.secret.as_str())) - .map_err(|e| { - Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) - })?; + if self.authorization_header == AuthorizationHeader::Basic + || self.authorization_header == AuthorizationHeader::Bearer + { + let secret = self.secret.as_ref().ok_or(Error::NoToken)?; + let header_value = HeaderValue::from_str(&format!( + "{} {}", + self.authorization_header, + secret.as_str() + )) + .map_err(|e| { + Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) + })?; headers.insert("Authorization", header_value); } @@ -133,8 +213,8 @@ impl ValidatorClientHttpClient { Ok(headers) } - /// Perform a HTTP GET request. - async fn get(&self, url: U) -> Result { + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response(&self, url: U) -> Result { let response = self .client .get(url) @@ -142,20 +222,25 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + async fn get(&self, url: U) -> Result { + let response = self.get_response(url).await?; self.signed_json(response).await } + async fn get_unsigned(&self, url: U) -> Result { + self.get_response(url) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_opt(&self, url: U) -> Result, Error> { - let response = self - .client - .get(url) - .headers(self.headers()?) - .send() - .await - .map_err(Error::Reqwest)?; - match ok_or_error(response).await { + match self.get_response(url).await { Ok(resp) => self.signed_json(resp).await.map(Option::Some), Err(err) => { if err.status() == Some(StatusCode::NOT_FOUND) { @@ -168,11 +253,11 @@ impl ValidatorClientHttpClient { } /// Perform a HTTP POST request. - async fn post( + async fn post_with_raw_response( &self, url: U, body: &T, - ) -> Result { + ) -> Result { let response = self .client .post(url) @@ -181,10 +266,27 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + async fn post( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.post_with_raw_response(url, body).await?; self.signed_json(response).await } + async fn post_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.post_with_raw_response(url, body).await?; + Ok(response.json().await?) + } + /// Perform a HTTP PATCH request. async fn patch(&self, url: U, body: &T) -> Result<(), Error> { let response = self @@ -200,6 +302,24 @@ impl ValidatorClientHttpClient { Ok(()) } + /// Perform a HTTP DELETE request. + async fn delete_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self + .client + .delete(url) + .headers(self.headers()?) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + let response = ok_or_error(response).await?; + Ok(response.json().await?) + } + /// `GET lighthouse/version` pub async fn get_lighthouse_version(&self) -> Result, Error> { let mut path = self.server.full.clone(); @@ -317,7 +437,7 @@ impl ValidatorClientHttpClient { pub async fn post_lighthouse_validators_web3signer( &self, request: &[Web3SignerValidatorRequest], - ) -> Result, Error> { + ) -> Result<(), Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -345,6 +465,50 @@ impl ValidatorClientHttpClient { self.patch(path, &ValidatorPatchRequest { enabled }).await } + + fn make_keystores_url(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("keystores"); + Ok(url) + } + + /// `GET lighthouse/auth` + pub async fn get_auth(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("auth"); + self.get_unsigned(url).await + } + + /// `GET eth/v1/keystores` + pub async fn get_keystores(&self) -> Result { + let url = self.make_keystores_url()?; + self.get_unsigned(url).await + } + + /// `POST eth/v1/keystores` + pub async fn post_keystores( + &self, + req: &ImportKeystoresRequest, + ) -> Result { + let url = self.make_keystores_url()?; + self.post_with_unsigned_response(url, req).await + } + + /// `DELETE eth/v1/keystores` + pub async fn delete_keystores( + &self, + req: &DeleteKeystoresRequest, + ) -> Result { + let url = self.make_keystores_url()?; + self.delete_with_unsigned_response(url, req).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an diff --git a/common/eth2/src/lighthouse_vc/mod.rs b/common/eth2/src/lighthouse_vc/mod.rs index b7de7c715..81b4fca28 100644 --- a/common/eth2/src/lighthouse_vc/mod.rs +++ b/common/eth2/src/lighthouse_vc/mod.rs @@ -1,4 +1,5 @@ pub mod http_client; +pub mod std_types; pub mod types; /// The number of bytes in the secp256k1 public key used as the authorization token for the VC API. diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs new file mode 100644 index 000000000..ebcce3fab --- /dev/null +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -0,0 +1,104 @@ +use account_utils::ZeroizeString; +use eth2_keystore::Keystore; +use serde::{Deserialize, Serialize}; +use slashing_protection::interchange::Interchange; +use types::PublicKeyBytes; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct AuthResponse { + pub token_path: String, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct ListKeystoresResponse { + pub data: Vec, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct SingleKeystoreResponse { + pub validating_pubkey: PublicKeyBytes, + pub derivation_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub readonly: Option, +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ImportKeystoresRequest { + pub keystores: Vec, + pub passwords: Vec, + pub slashing_protection: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); + +impl std::ops::Deref for KeystoreJsonStr { + type Target = Keystore; + fn deref(&self) -> &Keystore { + &self.0 + } +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); + +#[derive(Debug, Deserialize, Serialize)] +pub struct ImportKeystoresResponse { + pub data: Vec>, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Status { + pub status: T, + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, +} + +impl Status { + pub fn ok(status: T) -> Self { + Self { + status, + message: None, + } + } + + pub fn error(status: T, message: String) -> Self { + Self { + status, + message: Some(message), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum ImportKeystoreStatus { + Imported, + Duplicate, + Error, +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DeleteKeystoresRequest { + pub pubkeys: Vec, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct DeleteKeystoresResponse { + pub data: Vec>, + #[serde(with = "eth2_serde_utils::json_str")] + pub slashing_protection: Interchange, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum DeleteKeystoreStatus { + Deleted, + NotActive, + NotFound, + Error, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 9e311c9d6..25b305053 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use crate::lighthouse::Health; +pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; pub use types::*; diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 4d6de0516..861a6afe9 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -134,15 +134,18 @@ impl<'a> Builder<'a> { self } + /// Return the path to the validator dir to be built, i.e. `base_dir/pubkey`. + pub fn get_dir_path(base_validators_dir: &Path, voting_keystore: &Keystore) -> PathBuf { + base_validators_dir.join(format!("0x{}", voting_keystore.pubkey())) + } + /// Consumes `self`, returning a `ValidatorDir` if no error is encountered. pub fn build(self) -> Result { let (voting_keystore, voting_password) = self .voting_keystore .ok_or(Error::UninitializedVotingKeystore)?; - let dir = self - .base_validators_dir - .join(format!("0x{}", voting_keystore.pubkey())); + let dir = Self::get_dir_path(&self.base_validators_dir, &voting_keystore); if dir.exists() { return Err(Error::DirectoryAlreadyExists(dir)); diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 965a63c60..54eb55b8f 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -9,8 +9,6 @@ license = "Apache-2.0" [dependencies] serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" +serde_json = "1.0.58" hex = "0.4.2" ethereum-types = "0.12.1" - -[dev-dependencies] -serde_json = "1.0.58" diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs new file mode 100644 index 000000000..b9a181391 --- /dev/null +++ b/consensus/serde_utils/src/json_str.rs @@ -0,0 +1,25 @@ +//! Serialize a datatype as a JSON-blob within a single string. +use serde::{ + de::{DeserializeOwned, Error as _}, + ser::Error as _, + Deserialize, Deserializer, Serialize, Serializer, +}; + +/// Serialize as a JSON object within a string. +pub fn serialize(value: &T, serializer: S) -> Result +where + S: Serializer, + T: Serialize, +{ + serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) +} + +/// Deserialize a JSON object embedded in a string. +pub fn deserialize<'de, T, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: DeserializeOwned, +{ + let json_str = String::deserialize(deserializer)?; + serde_json::from_str(&json_str).map_err(D::Error::custom) +} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 87179997e..81e2bbe96 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -3,6 +3,7 @@ mod quoted_int; pub mod fixed_bytes_hex; pub mod hex; pub mod hex_vec; +pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u32_hex; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 4e8aa57a5..08f5cec07 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -10,6 +10,7 @@ path = "src/lib.rs" [dev-dependencies] tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } +logging = { path = "../common/logging" } [dependencies] tree_hash = "0.4.1" @@ -48,7 +49,7 @@ hyper = "0.14.4" eth2_serde_utils = "0.1.1" libsecp256k1 = "0.6.0" ring = "0.16.19" -rand = "0.7.3" +rand = { version = "0.7.3", features = ["small_rng"] } lighthouse_metrics = { path = "../common/lighthouse_metrics" } lazy_static = "1.4.0" itertools = "0.10.0" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 634e49fee..697bd602b 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -3,6 +3,11 @@ name = "slashing_protection" version = "0.1.0" authors = ["Michael Sproul ", "pscott "] edition = "2018" +autotests = false + +[[test]] +name = "slashing_protection_tests" +path = "tests/main.rs" [dependencies] tempfile = "3.1.0" diff --git a/validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite b/validator_client/slashing_protection/migration-tests/v0_no_enabled_column.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..5a95fe36e6e4e3058d08851e7148e195984e25c9 GIT binary patch literal 28672 zcmeI)&u$w<7y$77>%@(m;g;)00ymK=G_y0iJA;Ho8f~knZbIBtq9-%6GZUs#g3vVG}P zRKIt3uQDBTr&oP&7?rGVY$fYATdmXK1G%?t2G1Qn>}hv*yR+Bp4U)^Pjmu};`pQPT zT)VoSTq|!a9q;>c(k`!*?Q(s!ypcRP(O>H766zim$ll$ zE$VySU2X35KiYANtd6`p=#BO}6QDa}!oivQtKrPT#f$!D~+Z9X|Uxs&lcJTX0&X;53Zy7;Wo z9rdR}j_Sj;{Os^krQu$2xyZ2N*2oSlp4+}UMOB@!82uO#e_A2%8F{DD{FDC ztt=?bX`U&~WQy}#VIw&a+8E;QK_#=Mph6Q>5gQ(iM^!XWF>%+)l+j!e!_rFY)VfZ) zzl;}z6{%3la>Iq-lxLPGBHb9dDX_$bx$ZHM7g|}G8zu9i5|wg2!#FQ&aL|sCx#9xL zf?HPMtjH^_mFqn(3fBRGQ;}(sD_TijSnDL6jV8uj#-*?}!;}!rDq9F=pE+`q=BW{G zEW9GrSpT3MOe^L3&uo=Z;;J&I{M223&UyVmdj5~z^3X5n zd-Nqzt_lSNKmY_l00ck)1V8`;KmY_l00cnbi9j=se7_aPD^p4{8pcJ@l(3a%IE)L! zsd2$DE(oT@z3xZ#dTUzL==neT)kFWGztNxQH&=xM0w4eaAOHd&00JNY0w4eaAOHd& z@bU=ExbF%4IF3i#D2hhgFbqfAAP6G&{Xl*E{9kzJXY?KV0!buZo&iEnK>!3m00ck) z1V8`;KmY_l00cnb+y$C(?9a}&j{hM3#5SAJC~o+JduNtrIEo9-400#m2;#bHcUoA} zAKfmV7B_zW|IkDKpuf-`=pp)d+7)mf1V8`;KmY_l00ck)1V8`;KmY_l-~|(?MgFWG X#iLCajW$6TNA+1haR0tATaW(-ZhfP; literal 0 HcmV?d00001 diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 858acbfe9..1610b5237 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -30,6 +30,7 @@ pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; #[derive(PartialEq, Debug)] pub enum NotSafe { UnregisteredValidator(PublicKeyBytes), + DisabledValidator(PublicKeyBytes), InvalidBlock(InvalidBlock), InvalidAttestation(InvalidAttestation), PermissionsError, diff --git a/validator_client/slashing_protection/src/registration_tests.rs b/validator_client/slashing_protection/src/registration_tests.rs index 40a3d6ee7..472f41577 100644 --- a/validator_client/slashing_protection/src/registration_tests.rs +++ b/validator_client/slashing_protection/src/registration_tests.rs @@ -2,6 +2,7 @@ use crate::test_utils::*; use crate::*; +use std::iter; use tempfile::tempdir; #[test] @@ -30,3 +31,44 @@ fn double_register_validators() { assert_eq!(slashing_db.num_validator_rows().unwrap(), num_validators); assert_eq!(validator_ids, get_validator_ids()); } + +#[test] +fn reregister_validator() { + let dir = tempdir().unwrap(); + let slashing_db_file = dir.path().join("slashing_protection.sqlite"); + let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); + + let pk = pubkey(0); + + // Register validator. + slashing_db.register_validator(pk).unwrap(); + let id = slashing_db.get_validator_id(&pk).unwrap(); + + slashing_db + .with_transaction(|txn| { + // Disable. + slashing_db.update_validator_status(txn, id, false)?; + + // Fetching the validator as "registered" should now fail. + assert_eq!( + slashing_db.get_validator_id_in_txn(txn, &pk).unwrap_err(), + NotSafe::DisabledValidator(pk) + ); + + // Fetching its status should return false. + let (fetched_id, enabled) = + slashing_db.get_validator_id_with_status(txn, &pk)?.unwrap(); + assert_eq!(fetched_id, id); + assert!(!enabled); + + // Re-registering the validator should preserve its ID while changing its status to + // enabled. + slashing_db.register_validators_in_txn(iter::once(&pk), txn)?; + + let re_reg_id = slashing_db.get_validator_id_in_txn(txn, &pk)?; + assert_eq!(re_reg_id, id); + + Ok::<_, NotSafe>(()) + }) + .unwrap(); +} diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 2b187f46e..9f585c010 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -28,6 +28,9 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(100); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; +/// Column ID of the `validators.enabled` column. +pub const VALIDATORS_ENABLED_CID: i64 = 2; + #[derive(Debug, Clone)] pub struct SlashingDatabase { conn_pool: Pool, @@ -55,7 +58,7 @@ impl SlashingDatabase { restrict_file_permissions(path).map_err(|_| NotSafe::PermissionsError)?; let conn_pool = Self::open_conn_pool(path)?; - let conn = conn_pool.get()?; + let mut conn = conn_pool.get()?; conn.execute( "CREATE TABLE validators ( @@ -88,13 +91,55 @@ impl SlashingDatabase { params![], )?; + // The tables created above are for the v0 schema. We immediately update them + // to the latest schema without dropping the connection. + let txn = conn.transaction()?; + Self::apply_schema_migrations(&txn)?; + txn.commit()?; + Ok(Self { conn_pool }) } /// Open an existing `SlashingDatabase` from disk. + /// + /// This will automatically check for and apply the latest schema migrations. pub fn open(path: &Path) -> Result { let conn_pool = Self::open_conn_pool(path)?; - Ok(Self { conn_pool }) + let db = Self { conn_pool }; + db.with_transaction(Self::apply_schema_migrations)?; + Ok(db) + } + + fn apply_schema_migrations(txn: &Transaction) -> Result<(), NotSafe> { + // Add the `enabled` column to the `validators` table if it does not already exist. + let enabled_col_exists = txn + .query_row( + "SELECT cid, name FROM pragma_table_info('validators') WHERE name = 'enabled'", + params![], + |row| Ok((row.get(0)?, row.get(1)?)), + ) + .optional()? + .map(|(cid, name): (i64, String)| { + // Check that the enabled column is in the correct position with the right name. + // This is a defensive check that shouldn't do anything in practice unless the + // slashing DB has been manually edited. + if cid == VALIDATORS_ENABLED_CID && name == "enabled" { + Ok(()) + } else { + Err(NotSafe::ConsistencyError) + } + }) + .transpose()? + .is_some(); + + if !enabled_col_exists { + txn.execute( + "ALTER TABLE validators ADD COLUMN enabled BOOL NOT NULL DEFAULT TRUE", + params![], + )?; + } + + Ok(()) } /// Open a new connection pool with all of the necessary settings and tweaks. @@ -166,15 +211,37 @@ impl SlashingDatabase { public_keys: impl Iterator, txn: &Transaction, ) -> Result<(), NotSafe> { - let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; + let mut stmt = + txn.prepare("INSERT INTO validators (public_key, enabled) VALUES (?1, TRUE)")?; for pubkey in public_keys { - if self.get_validator_id_opt(txn, pubkey)?.is_none() { - stmt.execute([pubkey.as_hex_string()])?; + match self.get_validator_id_with_status(txn, pubkey)? { + None => { + stmt.execute([pubkey.as_hex_string()])?; + } + Some((validator_id, false)) => { + self.update_validator_status(txn, validator_id, true)?; + } + Some((_, true)) => { + // Validator already registered and enabled. + } } } Ok(()) } + pub fn update_validator_status( + &self, + txn: &Transaction, + validator_id: i64, + status: bool, + ) -> Result<(), NotSafe> { + txn.execute( + "UPDATE validators SET enabled = ? WHERE id = ?", + params![status, validator_id], + )?; + Ok(()) + } + /// Check that all of the given validators are registered. pub fn check_validator_registrations<'a>( &self, @@ -203,7 +270,7 @@ impl SlashingDatabase { .collect() } - /// Get the database-internal ID for a validator. + /// Get the database-internal ID for an enabled validator. /// /// This is NOT the same as a validator index, and depends on the ordering that validators /// are registered with the slashing protection database (and may vary between machines). @@ -213,26 +280,43 @@ impl SlashingDatabase { self.get_validator_id_in_txn(&txn, public_key) } - fn get_validator_id_in_txn( + pub fn get_validator_id_in_txn( &self, txn: &Transaction, public_key: &PublicKeyBytes, ) -> Result { - self.get_validator_id_opt(txn, public_key)? - .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key)) + let (validator_id, enabled) = self + .get_validator_id_with_status(txn, public_key)? + .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + if enabled { + Ok(validator_id) + } else { + Err(NotSafe::DisabledValidator(*public_key)) + } } - /// Optional version of `get_validator_id`. - fn get_validator_id_opt( + /// Get validator ID regardless of whether or not it is enabled. + pub fn get_validator_id_ignoring_status( &self, txn: &Transaction, public_key: &PublicKeyBytes, - ) -> Result, NotSafe> { + ) -> Result { + let (validator_id, _) = self + .get_validator_id_with_status(txn, public_key)? + .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key))?; + Ok(validator_id) + } + + pub fn get_validator_id_with_status( + &self, + txn: &Transaction, + public_key: &PublicKeyBytes, + ) -> Result, NotSafe> { Ok(txn .query_row( - "SELECT id FROM validators WHERE public_key = ?1", + "SELECT id, enabled FROM validators WHERE public_key = ?1", params![&public_key.as_hex_string()], - |row| row.get(0), + |row| Ok((row.get(0)?, row.get(1)?)), ) .optional()?) } @@ -722,13 +806,21 @@ impl SlashingDatabase { ) -> Result { let mut conn = self.conn_pool.get()?; let txn = &conn.transaction()?; + self.export_interchange_info_in_txn(genesis_validators_root, selected_pubkeys, txn) + } + pub fn export_interchange_info_in_txn( + &self, + genesis_validators_root: Hash256, + selected_pubkeys: Option<&[PublicKeyBytes]>, + txn: &Transaction, + ) -> Result { // Determine the validator IDs and public keys to export data for. let to_export = if let Some(selected_pubkeys) = selected_pubkeys { selected_pubkeys .iter() .map(|pubkey| { - let id = self.get_validator_id_in_txn(txn, pubkey)?; + let id = self.get_validator_id_ignoring_status(txn, pubkey)?; Ok((id, *pubkey)) }) .collect::>()? @@ -1089,7 +1181,6 @@ impl From for InterchangeError { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::pubkey; use tempfile::tempdir; #[test] @@ -1106,8 +1197,7 @@ mod tests { let file = dir.path().join("db.sqlite"); let _db1 = SlashingDatabase::create(&file).unwrap(); - let db2 = SlashingDatabase::open(&file).unwrap(); - db2.register_validator(pubkey(0)).unwrap_err(); + SlashingDatabase::open(&file).unwrap_err(); } // Attempting to create the same database twice should error. @@ -1152,9 +1242,12 @@ mod tests { fn test_transaction_failure() { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let _db1 = SlashingDatabase::create(&file).unwrap(); + let db = SlashingDatabase::create(&file).unwrap(); - let db2 = SlashingDatabase::open(&file).unwrap(); - db2.test_transaction().unwrap_err(); + db.with_transaction(|_| { + db.test_transaction().unwrap_err(); + Ok::<(), NotSafe>(()) + }) + .unwrap(); } } diff --git a/validator_client/slashing_protection/tests/main.rs b/validator_client/slashing_protection/tests/main.rs new file mode 100644 index 000000000..5b66bd87e --- /dev/null +++ b/validator_client/slashing_protection/tests/main.rs @@ -0,0 +1,2 @@ +mod interop; +mod migration; diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs new file mode 100644 index 000000000..cd3561f21 --- /dev/null +++ b/validator_client/slashing_protection/tests/migration.rs @@ -0,0 +1,68 @@ +//! Tests for upgrading a previous version of the database to the latest schema. +use slashing_protection::{NotSafe, SlashingDatabase}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use tempfile::tempdir; +use types::Hash256; + +fn test_data_dir() -> PathBuf { + Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") +} + +/// Copy `filename` from the test data dir to the temporary `dest` for testing. +fn make_copy(filename: &str, dest: &Path) -> PathBuf { + let source_file = test_data_dir().join(filename); + let dest_file = dest.join(filename); + fs::copy(source_file, &dest_file).unwrap(); + dest_file +} + +#[test] +fn add_enabled_column() { + let tmp = tempdir().unwrap(); + + let path = make_copy("v0_no_enabled_column.sqlite", tmp.path()); + let num_expected_validators = 5; + + // Database should open without errors, indicating successfull application of migrations. + // The input file has no `enabled` column, which should get added when opening it here. + let db = SlashingDatabase::open(&path).unwrap(); + + // Check that exporting an interchange file lists all the validators. + let interchange = db.export_all_interchange_info(Hash256::zero()).unwrap(); + assert_eq!(interchange.data.len(), num_expected_validators); + + db.with_transaction(|txn| { + // Check that all the validators are enabled and unique. + let uniq_validator_ids = interchange + .data + .iter() + .map(|data| { + let (validator_id, enabled) = db + .get_validator_id_with_status(txn, &data.pubkey) + .unwrap() + .unwrap(); + assert!(enabled); + (validator_id, data.pubkey) + }) + .collect::>(); + + assert_eq!(uniq_validator_ids.len(), num_expected_validators); + + // Check that we can disable them all. + for (&validator_id, pubkey) in &uniq_validator_ids { + db.update_validator_status(txn, validator_id, false) + .unwrap(); + let (loaded_id, enabled) = db + .get_validator_id_with_status(txn, pubkey) + .unwrap() + .unwrap(); + assert_eq!(validator_id, loaded_id); + assert!(!enabled); + } + + Ok::<_, NotSafe>(()) + }) + .unwrap(); +} diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index 531180cba..484ac50bd 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -162,25 +162,32 @@ impl ApiSecret { } /// Returns the path for the API token file - pub fn api_token_path(&self) -> &PathBuf { - &self.pk_path + pub fn api_token_path(&self) -> PathBuf { + self.pk_path.clone() } - /// Returns the value of the `Authorization` header which is used for verifying incoming HTTP - /// requests. - fn auth_header_value(&self) -> String { - format!("Basic {}", self.api_token()) + /// Returns the values of the `Authorization` header which indicate a valid incoming HTTP + /// request. + /// + /// For backwards-compatibility we accept the token in a basic authentication style, but this is + /// technically invalid according to RFC 7617 because the token is not a base64-encoded username + /// and password. As such, bearer authentication should be preferred. + fn auth_header_values(&self) -> Vec { + vec![ + format!("Basic {}", self.api_token()), + format!("Bearer {}", self.api_token()), + ] } /// Returns a `warp` header which filters out request that have a missing or inaccurate /// `Authorization` header. pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> { - let expected = self.auth_header_value(); + let expected = self.auth_header_values(); warp::any() .map(move || expected.clone()) .and(warp::filters::header::header("Authorization")) - .and_then(move |expected: String, header: String| async move { - if header == expected { + .and_then(move |expected: Vec, header: String| async move { + if expected.contains(&header) { Ok(()) } else { Err(warp_utils::reject::invalid_auth(header)) diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs new file mode 100644 index 000000000..ce4035581 --- /dev/null +++ b/validator_client/src/http_api/keystores.rs @@ -0,0 +1,290 @@ +//! Implementation of the standard keystore management API. +use crate::{signing_method::SigningMethod, InitializedValidators, ValidatorStore}; +use account_utils::ZeroizeString; +use eth2::lighthouse_vc::std_types::{ + DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, + ImportKeystoresRequest, ImportKeystoresResponse, InterchangeJsonStr, KeystoreJsonStr, + ListKeystoresResponse, SingleKeystoreResponse, Status, +}; +use eth2_keystore::Keystore; +use slog::{info, warn, Logger}; +use slot_clock::SlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::sync::Weak; +use tokio::runtime::Runtime; +use types::{EthSpec, PublicKeyBytes}; +use validator_dir::Builder as ValidatorDirBuilder; +use warp::Rejection; +use warp_utils::reject::{custom_bad_request, custom_server_error}; + +pub fn list( + validator_store: Arc>, +) -> ListKeystoresResponse { + let initialized_validators_rwlock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rwlock.read(); + + let keystores = initialized_validators + .validator_definitions() + .iter() + .filter(|def| def.enabled) + .map(|def| { + let validating_pubkey = def.voting_public_key.compress(); + + let (derivation_path, readonly) = initialized_validators + .signing_method(&validating_pubkey) + .map_or((None, None), |signing_method| match *signing_method { + SigningMethod::LocalKeystore { + ref voting_keystore, + .. + } => (voting_keystore.path(), None), + SigningMethod::Web3Signer { .. } => (None, Some(true)), + }); + + SingleKeystoreResponse { + validating_pubkey, + derivation_path, + readonly, + } + }) + .collect::>(); + + ListKeystoresResponse { data: keystores } +} + +pub fn import( + request: ImportKeystoresRequest, + validator_dir: PathBuf, + validator_store: Arc>, + runtime: Weak, + log: Logger, +) -> Result { + // Check request validity. This is the only cases in which we should return a 4xx code. + if request.keystores.len() != request.passwords.len() { + return Err(custom_bad_request(format!( + "mismatched numbers of keystores ({}) and passwords ({})", + request.keystores.len(), + request.passwords.len(), + ))); + } + + info!( + log, + "Importing keystores via standard HTTP API"; + "count" => request.keystores.len(), + ); + + // Import slashing protection data before keystores, so that new keystores don't start signing + // without it. Do not return early on failure, propagate the failure to each key. + let slashing_protection_status = + if let Some(InterchangeJsonStr(slashing_protection)) = request.slashing_protection { + // Warn for missing slashing protection. + for KeystoreJsonStr(ref keystore) in &request.keystores { + if let Some(public_key) = keystore.public_key() { + let pubkey_bytes = public_key.compress(); + if !slashing_protection + .data + .iter() + .any(|data| data.pubkey == pubkey_bytes) + { + warn!( + log, + "Slashing protection data not provided"; + "public_key" => ?public_key, + ); + } + } + } + + validator_store.import_slashing_protection(slashing_protection) + } else { + warn!(log, "No slashing protection data provided with keystores"); + Ok(()) + }; + + // Import each keystore. Some keystores may fail to be imported, so we record a status for each. + let mut statuses = Vec::with_capacity(request.keystores.len()); + + for (KeystoreJsonStr(keystore), password) in request + .keystores + .into_iter() + .zip(request.passwords.into_iter()) + { + let pubkey_str = keystore.pubkey().to_string(); + + let status = if let Err(e) = &slashing_protection_status { + // Slashing protection import failed, do not attempt to import the key. Record an + // error status. + Status::error( + ImportKeystoreStatus::Error, + format!("slashing protection import failed: {:?}", e), + ) + } else if let Some(runtime) = runtime.upgrade() { + // Import the keystore. + match import_single_keystore( + keystore, + password, + validator_dir.clone(), + &validator_store, + runtime, + ) { + Ok(status) => Status::ok(status), + Err(e) => { + warn!( + log, + "Error importing keystore, skipped"; + "pubkey" => pubkey_str, + "error" => ?e, + ); + Status::error(ImportKeystoreStatus::Error, e) + } + } + } else { + Status::error( + ImportKeystoreStatus::Error, + "validator client shutdown".into(), + ) + }; + statuses.push(status); + } + + Ok(ImportKeystoresResponse { data: statuses }) +} + +fn import_single_keystore( + keystore: Keystore, + password: ZeroizeString, + validator_dir_path: PathBuf, + validator_store: &ValidatorStore, + runtime: Arc, +) -> Result { + // Check if the validator key already exists, erroring if it is a remote signer validator. + let pubkey = keystore + .public_key() + .ok_or_else(|| format!("invalid pubkey: {}", keystore.pubkey()))?; + if let Some(def) = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .find(|def| def.voting_public_key == pubkey) + { + if !def.signing_definition.is_local_keystore() { + return Err("cannot import duplicate of existing remote signer validator".into()); + } else if def.enabled { + return Ok(ImportKeystoreStatus::Duplicate); + } + } + + // Check that the password is correct. + // In future we should re-structure to avoid the double decryption here. It's not as simple + // as removing this check because `add_validator_keystore` will break if provided with an + // invalid validator definition (`update_validators` will get stuck trying to decrypt with the + // wrong password indefinitely). + keystore + .decrypt_keypair(password.as_ref()) + .map_err(|e| format!("incorrect password: {:?}", e))?; + + let validator_dir = ValidatorDirBuilder::new(validator_dir_path) + .voting_keystore(keystore, password.as_ref()) + .store_withdrawal_keystore(false) + .build() + .map_err(|e| format!("failed to build validator directory: {:?}", e))?; + + // Drop validator dir so that `add_validator_keystore` can re-lock the keystore. + let voting_keystore_path = validator_dir.voting_keystore_path(); + drop(validator_dir); + + runtime + .block_on(validator_store.add_validator_keystore( + voting_keystore_path, + password, + true, + None, + )) + .map_err(|e| format!("failed to initialize validator: {:?}", e))?; + + Ok(ImportKeystoreStatus::Imported) +} + +pub fn delete( + request: DeleteKeystoresRequest, + validator_store: Arc>, + runtime: Weak, + log: Logger, +) -> Result { + // Remove from initialized validators. + let initialized_validators_rwlock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rwlock.write(); + + let mut statuses = request + .pubkeys + .iter() + .map(|pubkey_bytes| { + match delete_single_keystore(pubkey_bytes, &mut initialized_validators, runtime.clone()) + { + Ok(status) => Status::ok(status), + Err(error) => { + warn!( + log, + "Error deleting keystore"; + "pubkey" => ?pubkey_bytes, + "error" => ?error, + ); + Status::error(DeleteKeystoreStatus::Error, error) + } + } + }) + .collect::>(); + + // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out + // of date as it resets when it can't be decrypted. We update it just a single time to avoid + // continually resetting it after each key deletion. + if let Some(runtime) = runtime.upgrade() { + runtime + .block_on(initialized_validators.update_validators()) + .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; + } + + // Export the slashing protection data. + let slashing_protection = validator_store + .export_slashing_protection_for_keys(&request.pubkeys) + .map_err(|e| { + custom_server_error(format!("error exporting slashing protection: {:?}", e)) + })?; + + // Update stasuses based on availability of slashing protection data. + for (pubkey, status) in request.pubkeys.iter().zip(statuses.iter_mut()) { + if status.status == DeleteKeystoreStatus::NotFound + && slashing_protection + .data + .iter() + .any(|interchange_data| interchange_data.pubkey == *pubkey) + { + status.status = DeleteKeystoreStatus::NotActive; + } + } + + Ok(DeleteKeystoresResponse { + data: statuses, + slashing_protection, + }) +} + +fn delete_single_keystore( + pubkey_bytes: &PublicKeyBytes, + initialized_validators: &mut InitializedValidators, + runtime: Weak, +) -> Result { + if let Some(runtime) = runtime.upgrade() { + let pubkey = pubkey_bytes + .decompress() + .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; + + runtime + .block_on(initialized_validators.delete_definition_and_keystore(&pubkey)) + .map_err(|e| format!("unable to disable and delete: {:?}", e)) + } else { + Err("validator client shutdown".into()) + } +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 5e0f3443a..8a5b24f87 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,14 +1,18 @@ mod api_secret; mod create_validator; +mod keystores; mod tests; use crate::ValidatorStore; use account_utils::mnemonic_from_phrase; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; -use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes}; +use eth2::lighthouse_vc::{ + std_types::AuthResponse, + types::{self as api_types, PublicKey, PublicKeyBytes}, +}; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; -use slog::{crit, info, Logger}; +use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; use std::future::Future; use std::marker::PhantomData; @@ -106,7 +110,7 @@ pub fn serve( // Configure CORS. let cors_builder = { let builder = warp::cors() - .allow_methods(vec!["GET", "POST", "PATCH"]) + .allow_methods(vec!["GET", "POST", "PATCH", "DELETE"]) .allow_headers(vec!["Content-Type", "Authorization"]); warp_utils::cors::set_builder_origins( @@ -125,7 +129,20 @@ pub fn serve( } let authorization_header_filter = ctx.api_secret.authorization_header_filter(); - let api_token_path = ctx.api_secret.api_token_path(); + let mut api_token_path = ctx.api_secret.api_token_path(); + + // Attempt to convert the path to an absolute path, but don't error if it fails. + match api_token_path.canonicalize() { + Ok(abs_path) => api_token_path = abs_path, + Err(e) => { + warn!( + log, + "Error canonicalizing token path"; + "error" => ?e, + ); + } + }; + let signer = ctx.api_secret.signer(); let signer = warp::any().map(move || signer.clone()); @@ -154,9 +171,15 @@ pub fn serve( }) }); + let inner_ctx = ctx.clone(); + let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_spec = Arc::new(ctx.spec.clone()); let spec_filter = warp::any().map(move || inner_spec.clone()); + let api_token_path_inner = api_token_path.clone(); + let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // GET lighthouse/version let get_node_version = warp::path("lighthouse") .and(warp::path("version")) @@ -348,7 +371,7 @@ pub fn serve( .and(warp::path("keystore")) .and(warp::path::end()) .and(warp::body::json()) - .and(validator_dir_filter) + .and(validator_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) .and(runtime_filter.clone()) @@ -451,9 +474,9 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(warp::body::json()) - .and(validator_store_filter) - .and(signer) - .and(runtime_filter) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and(runtime_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, @@ -495,6 +518,60 @@ pub fn serve( }, ); + // GET /lighthouse/auth + let get_auth = warp::path("lighthouse").and(warp::path("auth").and(warp::path::end())); + let get_auth = get_auth + .and(signer.clone()) + .and(api_token_path_filter) + .and_then(|signer, token_path: PathBuf| { + blocking_signed_json_task(signer, move || { + Ok(AuthResponse { + token_path: token_path.display().to_string(), + }) + }) + }); + + // Standard key-manager endpoints. + let eth_v1 = warp::path("eth").and(warp::path("v1")); + let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); + + // GET /eth/v1/keystores + let get_std_keystores = std_keystores + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and_then(|signer, validator_store: Arc>| { + blocking_signed_json_task(signer, move || Ok(keystores::list(validator_store))) + }); + + // POST /eth/v1/keystores + let post_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_dir_filter) + .and(validator_store_filter.clone()) + .and(runtime_filter.clone()) + .and(log_filter.clone()) + .and_then( + |request, signer, validator_dir, validator_store, runtime, log| { + blocking_signed_json_task(signer, move || { + keystores::import(request, validator_dir, validator_store, runtime, log) + }) + }, + ); + + // DELETE /eth/v1/keystores + let delete_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer) + .and(validator_store_filter) + .and(runtime_filter) + .and(log_filter) + .and_then(|request, signer, validator_store, runtime, log| { + blocking_signed_json_task(signer, move || { + keystores::delete(request, validator_store, runtime, log) + }) + }); + let routes = warp::any() .and(authorization_header_filter) // Note: it is critical that the `authorization_header_filter` is applied to all routes. @@ -508,16 +585,21 @@ pub fn serve( .or(get_lighthouse_health) .or(get_lighthouse_spec) .or(get_lighthouse_validators) - .or(get_lighthouse_validators_pubkey), + .or(get_lighthouse_validators_pubkey) + .or(get_std_keystores), ) .or(warp::post().and( post_validators .or(post_validators_keystore) .or(post_validators_mnemonic) - .or(post_validators_web3signer), + .or(post_validators_web3signer) + .or(post_std_keystores), )) - .or(warp::patch().and(patch_validators)), + .or(warp::patch().and(patch_validators)) + .or(warp::delete().and(delete_std_keystores)), ) + // The auth route is the only route that is allowed to be accessed without the API token. + .or(warp::get().and(get_auth)) // Maps errors into HTTP responses. .recover(warp_utils::reject::handle_rejection) // Add a `Server` header. @@ -550,7 +632,7 @@ pub async fn blocking_signed_json_task( ) -> Result where S: Fn(&[u8]) -> String, - F: Fn() -> Result + Send + 'static, + F: FnOnce() -> Result + Send + 'static, T: Serialize + Send + 'static, { warp_utils::task::blocking_task(func) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index c9ef869be..fda622901 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1,6 +1,8 @@ #![cfg(test)] #![cfg(not(debug_assertions))] +mod keystores; + use crate::doppelganger_service::DoppelgangerService; use crate::{ http_api::{ApiSecret, Config as HttpConfig, Context}, @@ -9,16 +11,16 @@ use crate::{ }; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - ZeroizeString, + random_password_string, ZeroizeString, }; use deposit_contract::decode_eth1_tx_data; -use environment::null_logger; use eth2::{ lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, Error as ApiError, }; use eth2_keystore::KeystoreBuilder; +use logging::test_logger; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; @@ -40,6 +42,7 @@ type E = MainnetEthSpec; struct ApiTester { client: ValidatorClientHttpClient, initialized_validators: Arc>, + validator_store: Arc>, url: SensitiveUrl, _server_shutdown: oneshot::Sender<()>, _validator_dir: TempDir, @@ -58,7 +61,7 @@ fn build_runtime() -> Arc { impl ApiTester { pub async fn new(runtime: std::sync::Weak) -> Self { - let log = null_logger().unwrap(); + let log = test_logger(); let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); @@ -92,7 +95,7 @@ impl ApiTester { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(runtime.clone(), exit, log.clone(), shutdown_tx); - let validator_store = ValidatorStore::<_, E>::new( + let validator_store = Arc::new(ValidatorStore::<_, E>::new( initialized_validators, slashing_protection, Hash256::repeat_byte(42), @@ -101,7 +104,7 @@ impl ApiTester { slot_clock, executor, log.clone(), - ); + )); validator_store .register_all_in_doppelganger_protection_if_enabled() @@ -113,7 +116,7 @@ impl ApiTester { runtime, api_secret, validator_dir: Some(validator_dir.path().into()), - validator_store: Some(Arc::new(validator_store)), + validator_store: Some(validator_store.clone()), spec: E::default_spec(), config: HttpConfig { enabled: true, @@ -144,11 +147,12 @@ impl ApiTester { let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap(); Self { - initialized_validators, - _validator_dir: validator_dir, client, + initialized_validators, + validator_store, url, _server_shutdown: shutdown_tx, + _validator_dir: validator_dir, _runtime_shutdown: runtime_shutdown, } } @@ -456,7 +460,7 @@ impl ApiTester { self.client .post_lighthouse_validators_web3signer(&request) .await - .unwrap_err(); + .unwrap(); assert_eq!(self.vals_total(), initial_vals + s.count); if s.enabled { @@ -608,6 +612,34 @@ fn routes_with_invalid_auth() { .await }) .await + .test_with_invalid_auth(|client| async move { client.get_keystores().await }) + .await + .test_with_invalid_auth(|client| async move { + let password = random_password_string(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .map(KeystoreJsonStr) + .unwrap(); + client + .post_keystores(&ImportKeystoresRequest { + keystores: vec![keystore], + passwords: vec![password], + slashing_protection: None, + }) + .await + }) + .await + .test_with_invalid_auth(|client| async move { + let keypair = Keypair::random(); + client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: vec![keypair.pk.compress()], + }) + .await + }) + .await }); } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs new file mode 100644 index 000000000..1b35a0b57 --- /dev/null +++ b/validator_client/src/http_api/tests/keystores.rs @@ -0,0 +1,977 @@ +use super::*; +use account_utils::random_password_string; +use eth2::lighthouse_vc::{ + http_client::ValidatorClientHttpClient as HttpClient, + std_types::{KeystoreJsonStr as Keystore, *}, + types::Web3SignerValidatorRequest, +}; +// use eth2_keystore::Keystore; +use itertools::Itertools; +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use slashing_protection::interchange::{Interchange, InterchangeMetadata}; +use std::collections::HashMap; +use std::path::Path; + +fn new_keystore(password: ZeroizeString) -> Keystore { + let keypair = Keypair::random(); + Keystore( + KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) + .unwrap() + .build() + .unwrap(), + ) +} + +fn web3_signer_url() -> String { + "http://localhost:1/this-url-hopefully-doesnt-exist".into() +} + +fn new_web3signer_validator() -> (Keypair, Web3SignerValidatorRequest) { + let keypair = Keypair::random(); + let pk = keypair.pk.clone(); + (keypair, web3signer_validator_with_pubkey(pk)) +} + +fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorRequest { + Web3SignerValidatorRequest { + enable: true, + description: "".into(), + graffiti: None, + voting_public_key: pubkey, + url: web3_signer_url(), + root_certificate_path: None, + request_timeout_ms: None, + } +} + +fn run_test(f: F) +where + F: FnOnce(ApiTester) -> V, + V: Future, +{ + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester = ApiTester::new(weak_runtime).await; + f(tester).await + }); +} + +fn run_dual_vc_test(f: F) +where + F: FnOnce(ApiTester, ApiTester) -> V, + V: Future, +{ + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester1 = ApiTester::new(weak_runtime.clone()).await; + let tester2 = ApiTester::new(weak_runtime).await; + f(tester1, tester2).await + }); +} + +fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { + keystore.0.public_key().unwrap().compress() +} + +fn all_with_status(count: usize, status: T) -> impl Iterator { + std::iter::repeat(status).take(count) +} + +fn all_imported(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Imported) +} + +fn all_duplicate(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Duplicate) +} + +fn all_import_error(count: usize) -> impl Iterator { + all_with_status(count, ImportKeystoreStatus::Error) +} + +fn all_deleted(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::Deleted) +} + +fn all_not_active(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::NotActive) +} + +fn all_not_found(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::NotFound) +} + +fn all_delete_error(count: usize) -> impl Iterator { + all_with_status(count, DeleteKeystoreStatus::Error) +} + +fn check_get_response<'a>( + response: &ListKeystoresResponse, + expected_keystores: impl IntoIterator, +) { + for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { + assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); + assert_eq!(ks1.derivation_path, ks2.path()); + assert!(ks1.readonly == None || ks1.readonly == Some(false)); + } +} + +fn check_import_response( + response: &ImportKeystoresResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + expected_status, status.status, + "message: {:?}", + status.message + ); + } +} + +fn check_delete_response<'a>( + response: &DeleteKeystoresResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + status.status, expected_status, + "message: {:?}", + status.message + ); + } +} + +#[test] +fn get_auth_no_token() { + run_test(|mut tester| async move { + tester.client.send_authorization_header(false); + let auth_response = tester.client.get_auth().await.unwrap(); + + // Load the file from the returned path. + let token_path = Path::new(&auth_response.token_path); + let token = HttpClient::load_api_token_from_file(token_path).unwrap(); + + // The token should match the one that the client was originally initialised with. + assert!(tester.client.api_token() == Some(&token)); + }) +} + +#[test] +fn get_empty_keystores() { + run_test(|tester| async move { + let res = tester.client.get_keystores().await.unwrap(); + assert_eq!(res, ListKeystoresResponse { data: vec![] }); + }) +} + +#[test] +fn import_new_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &keystores); + }) +} + +#[test] +fn import_only_duplicate_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + + // All keystores should be imported on first import. + let import_res = tester.client.post_keystores(&req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // No keystores should be imported on repeat import. + let import_res = tester.client.post_keystores(&req).await.unwrap(); + check_import_response(&import_res, all_duplicate(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &keystores); + }) +} + +#[test] +fn import_some_duplicate_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let num_keystores = 5; + let keystores_all = (0..num_keystores) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Import even numbered keystores first. + let keystores1 = keystores_all + .iter() + .enumerate() + .filter_map(|(i, keystore)| { + if i % 2 == 0 { + Some(keystore.clone()) + } else { + None + } + }) + .collect::>(); + + let req1 = ImportKeystoresRequest { + keystores: keystores1.clone(), + passwords: vec![password.clone(); keystores1.len()], + slashing_protection: None, + }; + + let req2 = ImportKeystoresRequest { + keystores: keystores_all.clone(), + passwords: vec![password.clone(); keystores_all.len()], + slashing_protection: None, + }; + + let import_res = tester.client.post_keystores(&req1).await.unwrap(); + check_import_response(&import_res, all_imported(keystores1.len())); + + // Check partial import. + let expected = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Duplicate + } else { + ImportKeystoreStatus::Imported + } + }); + let import_res = tester.client.post_keystores(&req2).await.unwrap(); + check_import_response(&import_res, expected); + }) +} + +#[test] +fn import_wrong_number_of_passwords() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let err = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone()], + slashing_protection: None, + }) + .await + .unwrap_err(); + assert_eq!(err.status().unwrap(), 400); + }) +} + +#[test] +fn get_web3_signer_keystores() { + run_test(|tester| async move { + let num_local = 3; + let num_remote = 2; + + // Add some local validators. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_imported(keystores.len())); + + // Add some web3signer validators. + let remote_vals = (0..num_remote) + .map(|_| new_web3signer_validator().1) + .collect::>(); + + tester + .client + .post_lighthouse_validators_web3signer(&remote_vals) + .await + .unwrap(); + + // Check that both local and remote validators are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + + let expected_responses = keystores + .iter() + .map(|local_keystore| SingleKeystoreResponse { + validating_pubkey: keystore_pubkey(local_keystore), + derivation_path: local_keystore.path(), + readonly: None, + }) + .chain(remote_vals.iter().map(|remote_val| SingleKeystoreResponse { + validating_pubkey: remote_val.voting_public_key.compress(), + derivation_path: None, + readonly: Some(true), + })) + .collect::>(); + + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} + +#[test] +fn import_and_delete_conflicting_web3_signer_keystores() { + run_test(|tester| async move { + let num_keystores = 3; + + // Create some keystores to be used as both web3signer keystores and local keystores. + let password = random_password_string(); + let keystores = (0..num_keystores) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + + // Add the validators as web3signer validators. + let remote_vals = pubkeys + .iter() + .map(|pubkey| web3signer_validator_with_pubkey(pubkey.decompress().unwrap())) + .collect::>(); + + tester + .client + .post_lighthouse_validators_web3signer(&remote_vals) + .await + .unwrap(); + + // Attempt to import the same validators as local validators, which should error. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_import_error(keystores.len())); + + // Attempt to delete the web3signer validators, which should fail. + let delete_req = DeleteKeystoresRequest { + pubkeys: pubkeys.clone(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_delete_error(keystores.len())); + + // Get should still list all the validators as `readonly`. + let get_res = tester.client.get_keystores().await.unwrap(); + for (ks, pubkey) in get_res.data.iter().zip_eq(&pubkeys) { + assert_eq!(ks.validating_pubkey, *pubkey); + assert_eq!(ks.derivation_path, None); + assert_eq!(ks.readonly, Some(true)); + } + + // Disabling the web3signer validators should *still* prevent them from being + // overwritten. + for pubkey in &pubkeys { + tester + .client + .patch_lighthouse_validators(pubkey, false) + .await + .unwrap(); + } + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_import_error(keystores.len())); + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_delete_error(keystores.len())); + }) +} + +#[test] +fn import_keystores_wrong_password() { + run_test(|tester| async move { + let num_keystores = 4; + let (keystores, correct_passwords): (Vec<_>, Vec<_>) = (0..num_keystores) + .map(|_| { + let password = random_password_string(); + (new_keystore(password.clone()), password) + }) + .unzip(); + + // First import with some incorrect passwords. + let incorrect_passwords = (0..num_keystores) + .map(|i| { + if i % 2 == 0 { + random_password_string() + } else { + correct_passwords[i].clone() + } + }) + .collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: incorrect_passwords.clone(), + slashing_protection: None, + }) + .await + .unwrap(); + + let expected_statuses = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Error + } else { + ImportKeystoreStatus::Imported + } + }); + check_import_response(&import_res, expected_statuses); + + // Import again with the correct passwords and check that the statuses are as expected. + let correct_import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: correct_passwords.clone(), + slashing_protection: None, + }; + let import_res = tester + .client + .post_keystores(&correct_import_req) + .await + .unwrap(); + let expected_statuses = (0..num_keystores).map(|i| { + if i % 2 == 0 { + ImportKeystoreStatus::Imported + } else { + ImportKeystoreStatus::Duplicate + } + }); + check_import_response(&import_res, expected_statuses); + + // Import one final time, at which point all keys should be duplicates. + let import_res = tester + .client + .post_keystores(&correct_import_req) + .await + .unwrap(); + check_import_response( + &import_res, + (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), + ); + }); +} + +#[test] +fn import_invalid_slashing_protection() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Invalid slashing protection data with mismatched version and mismatched GVR. + let slashing_protection = Interchange { + metadata: InterchangeMetadata { + interchange_format_version: 0, + genesis_validators_root: Hash256::zero(), + }, + data: vec![], + }; + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + }) + .await + .unwrap(); + + // All keystores should be imported. + check_import_response(&import_res, all_import_error(keystores.len())); + + // Check that GET lists none of the failed keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_get_response(&get_res, &[]); + }) +} + +fn all_indices(count: usize) -> Vec { + (0..count).collect() +} + +#[test] +fn migrate_all_with_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + all_indices(n), + all_indices(n), + all_indices(n), + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (2, make_attestation(1, 2), false), + ], + ); +} + +#[test] +fn migrate_some_with_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + vec![0, 1], + vec![0, 1], + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (0, make_attestation(2, 3), true), + (1, make_attestation(3, 4), true), + ], + ); +} + +#[test] +fn migrate_some_missing_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + vec![0, 1], + vec![0], + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), true), + (0, make_attestation(2, 3), true), + ], + ); +} + +#[test] +fn migrate_some_extra_slashing_protection() { + let n = 3; + generic_migration_test( + n, + vec![ + (0, make_attestation(1, 2)), + (1, make_attestation(2, 3)), + (2, make_attestation(1, 2)), + ], + all_indices(n), + all_indices(n), + vec![0, 1], + vec![ + (0, make_attestation(1, 2), false), + (1, make_attestation(2, 3), false), + (0, make_attestation(2, 3), true), + (1, make_attestation(3, 4), true), + (2, make_attestation(2, 3), false), + ], + ); +} + +/// Run a test that creates some validators on one VC, and then migrates them to a second VC. +/// +/// All indices given are in the range 0..`num_validators`. They are *not* validator indices in the +/// ordinary sense. +/// +/// Parameters: +/// +/// - `num_validators`: the total number of validators to create +/// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)` +/// - `delete_indices`: validators to delete from the first VC +/// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should +/// be a subset of `delete_indices` or the test will panic. +/// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. +/// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool +/// indicates whether the signing should be successful. +fn generic_migration_test( + num_validators: usize, + first_vc_attestations: Vec<(usize, Attestation)>, + delete_indices: Vec, + slashing_protection_indices: Vec, + import_indices: Vec, + second_vc_attestations: Vec<(usize, Attestation, bool)>, +) { + run_dual_vc_test(move |tester1, tester2| async move { + // Create the validators on VC1. + let (keystores, passwords): (Vec<_>, Vec<_>) = (0..num_validators) + .map(|_| { + let password = random_password_string(); + (new_keystore(password.clone()), password) + }) + .unzip(); + + let import_res = tester1 + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: passwords.clone(), + slashing_protection: None, + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // Sign attestations on VC1. + for (validator_index, mut attestation) in first_vc_attestations { + let public_key = keystore_pubkey(&keystores[validator_index]); + let current_epoch = attestation.data.target.epoch; + tester1 + .validator_store + .sign_attestation(public_key, 0, &mut attestation, current_epoch) + .await + .unwrap(); + } + + // Delete the selected keys from VC1. + let delete_res = tester1 + .client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: delete_indices + .iter() + .copied() + .map(|i| keystore_pubkey(&keystores[i])) + .collect(), + }) + .await + .unwrap(); + check_delete_response(&delete_res, all_deleted(delete_indices.len())); + + // Check that slashing protection data was returned for all selected validators. + assert_eq!( + delete_res.slashing_protection.data.len(), + delete_indices.len() + ); + for &i in &delete_indices { + assert!(delete_res + .slashing_protection + .data + .iter() + .any(|interchange_data| interchange_data.pubkey == keystore_pubkey(&keystores[i]))); + } + + // Filter slashing protection according to `slashing_protection_indices`. + let mut slashing_protection = delete_res.slashing_protection; + let data = std::mem::take(&mut slashing_protection.data); + + for &i in &slashing_protection_indices { + let pubkey = keystore_pubkey(&keystores[i]); + slashing_protection.data.push( + data.iter() + .find(|interchange_data| interchange_data.pubkey == pubkey) + .expect("slashing protection indices should be subset of deleted") + .clone(), + ); + } + assert_eq!( + slashing_protection.data.len(), + slashing_protection_indices.len() + ); + + // Import into the 2nd VC using the slashing protection data. + let import_res = tester2 + .client + .post_keystores(&ImportKeystoresRequest { + keystores: import_indices + .iter() + .copied() + .map(|i| keystores[i].clone()) + .collect(), + passwords: import_indices + .iter() + .copied() + .map(|i| passwords[i].clone()) + .collect(), + slashing_protection: Some(InterchangeJsonStr(slashing_protection)), + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(import_indices.len())); + + // Sign attestations on the second VC. + for (validator_index, mut attestation, should_succeed) in second_vc_attestations { + let public_key = keystore_pubkey(&keystores[validator_index]); + let current_epoch = attestation.data.target.epoch; + match tester2 + .validator_store + .sign_attestation(public_key, 0, &mut attestation, current_epoch) + .await + { + Ok(()) => assert!(should_succeed), + Err(e) => assert!(!should_succeed, "{:?}", e), + } + } + }); +} + +#[test] +fn delete_keystores_twice() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // 1. Import all keystores. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // 2. Delete all. + let delete_req = DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_deleted(keystores.len())); + + // 3. Delete again. + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_not_active(keystores.len())); + }) +} + +#[test] +fn delete_nonexistent_keystores() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Delete all. + let delete_req = DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }; + let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); + check_delete_response(&delete_res, all_not_found(keystores.len())); + }) +} + +fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { + Attestation { + aggregation_bits: BitList::with_capacity( + ::MaxValidatorsPerCommittee::to_usize(), + ) + .unwrap(), + data: AttestationData { + source: Checkpoint { + epoch: Epoch::new(source_epoch), + root: Hash256::from_low_u64_le(source_epoch), + }, + target: Checkpoint { + epoch: Epoch::new(target_epoch), + root: Hash256::from_low_u64_le(target_epoch), + }, + ..AttestationData::default() + }, + signature: AggregateSignature::empty(), + } +} + +#[test] +fn delete_concurrent_with_signing() { + let runtime = build_runtime(); + let num_keys = 8; + let num_signing_threads = 8; + let num_attestations = 100; + let num_delete_threads = 8; + let num_delete_attempts = 100; + let delete_prob = 0.01; + + assert!( + num_keys % num_signing_threads == 0, + "num_keys should be divisible by num threads for simplicity" + ); + + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + let tester = ApiTester::new(weak_runtime).await; + + // Generate a lot of keys and import them. + let password = random_password_string(); + let keystores = (0..num_keys) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // Start several threads signing attestations at sequential epochs. + let mut join_handles = vec![]; + + for thread_index in 0..num_signing_threads { + let keys_per_thread = num_keys / num_signing_threads; + let validator_store = tester.validator_store.clone(); + let thread_pubkeys = all_pubkeys + [thread_index * keys_per_thread..(thread_index + 1) * keys_per_thread] + .to_vec(); + + let handle = runtime.spawn(async move { + for j in 0..num_attestations { + let mut att = make_attestation(j, j + 1); + for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + let _ = validator_store + .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) + .await; + } + } + }); + join_handles.push(handle); + } + + // Concurrently, delete each validator one at a time. Store the slashing protection + // data so we can ensure it doesn't change after a key is exported. + let mut delete_handles = vec![]; + for _ in 0..num_delete_threads { + let client = tester.client.clone(); + let all_pubkeys = all_pubkeys.clone(); + + let handle = runtime.spawn(async move { + let mut rng = SmallRng::from_entropy(); + + let mut slashing_protection = vec![]; + for _ in 0..num_delete_attempts { + let to_delete = all_pubkeys + .iter() + .filter(|_| rng.gen_bool(delete_prob)) + .copied() + .collect::>(); + + if !to_delete.is_empty() { + let delete_res = client + .delete_keystores(&DeleteKeystoresRequest { pubkeys: to_delete }) + .await + .unwrap(); + + for status in delete_res.data.iter() { + assert_ne!(status.status, DeleteKeystoreStatus::Error); + } + + slashing_protection.push(delete_res.slashing_protection); + } + } + slashing_protection + }); + + delete_handles.push(handle); + } + + // Collect slashing protection. + let mut slashing_protection_map = HashMap::new(); + let collected_slashing_protection = futures::future::join_all(delete_handles).await; + + for interchange in collected_slashing_protection + .into_iter() + .map(Result::unwrap) + .flatten() + { + for validator_data in interchange.data { + slashing_protection_map + .entry(validator_data.pubkey) + .and_modify(|existing| { + assert_eq!( + *existing, validator_data, + "slashing protection data changed after first export" + ) + }) + .or_insert(validator_data); + } + } + + futures::future::join_all(join_handles).await + }); +} + +#[test] +fn delete_then_reimport() { + run_test(|tester| async move { + let password = random_password_string(); + let keystores = (0..2) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // 1. Import all keystores. + let import_req = ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }; + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + + // 2. Delete all. + let delete_res = tester + .client + .delete_keystores(&DeleteKeystoresRequest { + pubkeys: keystores.iter().map(keystore_pubkey).collect(), + }) + .await + .unwrap(); + check_delete_response(&delete_res, all_deleted(keystores.len())); + + // 3. Re-import + let import_res = tester.client.post_keystores(&import_req).await.unwrap(); + check_import_response(&import_res, all_imported(keystores.len())); + }) +} diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 72e651f7d..5900c8e56 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -14,19 +14,22 @@ use account_utils::{ }, ZeroizeString, }; +use eth2::lighthouse_vc::std_types::DeleteKeystoreStatus; use eth2_keystore::Keystore; use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError}; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; -use std::fs::File; +use std::fs::{self, File}; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use types::{Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; +use validator_dir::Builder as ValidatorDirBuilder; use crate::key_cache; use crate::key_cache::KeyCache; @@ -67,6 +70,10 @@ pub enum Error { UnableToSaveDefinitions(validator_definitions::Error), /// It is not legal to try and initialize a disabled validator definition. UnableToInitializeDisabledValidator, + /// There was an error while deleting a keystore file. + UnableToDeleteKeystore(PathBuf, io::Error), + /// There was an error while deleting a validator dir. + UnableToDeleteValidatorDir(PathBuf, io::Error), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), /// There was an error running a tokio async task. @@ -83,6 +90,8 @@ pub enum Error { InvalidWeb3SignerRootCertificateFile(io::Error), InvalidWeb3SignerRootCertificate(ReqwestError), UnableToBuildWeb3SignerClient(ReqwestError), + /// Unable to apply an action to a validator because it is using a remote signer. + InvalidActionOnRemoteValidator, } impl From for Error { @@ -101,12 +110,15 @@ pub struct InitializedValidator { impl InitializedValidator { /// Return a reference to this validator's lockfile if it has one. - pub fn keystore_lockfile(&self) -> Option<&Lockfile> { + pub fn keystore_lockfile(&self) -> Option> { match self.signing_method.as_ref() { SigningMethod::LocalKeystore { ref voting_keystore_lockfile, .. - } => Some(voting_keystore_lockfile), + } => MutexGuard::try_map(voting_keystore_lockfile.lock(), |option_lockfile| { + option_lockfile.as_mut() + }) + .ok(), // Web3Signer validators do not have any lockfiles. SigningMethod::Web3Signer { .. } => None, } @@ -213,7 +225,7 @@ impl InitializedValidator { let lockfile_path = get_lockfile_path(&voting_keystore_path) .ok_or_else(|| Error::BadVotingKeystorePath(voting_keystore_path.clone()))?; - let voting_keystore_lockfile = Lockfile::new(lockfile_path)?; + let voting_keystore_lockfile = Mutex::new(Some(Lockfile::new(lockfile_path)?)); SigningMethod::LocalKeystore { voting_keystore_path, @@ -381,6 +393,25 @@ impl InitializedValidators { .map(|v| v.signing_method.clone()) } + /// Add a validator definition to `self`, replacing any disabled definition with the same + /// voting public key. + /// + /// The on-disk representation of the validator definitions & the key cache will both be + /// updated. + pub async fn add_definition_replace_disabled( + &mut self, + def: ValidatorDefinition, + ) -> Result<(), Error> { + // Drop any disabled definitions with the same public key. + let delete_def = |existing_def: &ValidatorDefinition| { + !existing_def.enabled && existing_def.voting_public_key == def.voting_public_key + }; + self.definitions.retain(|def| !delete_def(def)); + + // Add the definition. + self.add_definition(def).await + } + /// Add a validator definition to `self`, overwriting the on-disk representation of `self`. pub async fn add_definition(&mut self, def: ValidatorDefinition) -> Result<(), Error> { if self @@ -403,6 +434,91 @@ impl InitializedValidators { Ok(()) } + /// Delete the validator definition and keystore for `pubkey`. + /// + /// The delete is carried out in stages so that the filesystem is never left in an inconsistent + /// state, even in case of errors or crashes. + pub async fn delete_definition_and_keystore( + &mut self, + pubkey: &PublicKey, + ) -> Result { + // 1. Disable the validator definition. + // + // We disable before removing so that in case of a crash the auto-discovery mechanism + // won't re-activate the keystore. + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| &def.voting_public_key == pubkey) + { + if def.signing_definition.is_local_keystore() { + def.enabled = false; + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + } else { + return Err(Error::InvalidActionOnRemoteValidator); + } + } else { + return Ok(DeleteKeystoreStatus::NotFound); + } + + // 2. Delete from `self.validators`, which holds the signing method. + // Delete the keystore files. + if let Some(initialized_validator) = self.validators.remove(&pubkey.compress()) { + if let SigningMethod::LocalKeystore { + ref voting_keystore_path, + ref voting_keystore_lockfile, + ref voting_keystore, + .. + } = *initialized_validator.signing_method + { + // Drop the lock file so that it may be deleted. This is particularly important on + // Windows where the lockfile will fail to be deleted if it is still open. + drop(voting_keystore_lockfile.lock().take()); + + self.delete_keystore_or_validator_dir(voting_keystore_path, voting_keystore)?; + } + } + + // 3. Delete from validator definitions entirely. + self.definitions + .retain(|def| &def.voting_public_key != pubkey); + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(DeleteKeystoreStatus::Deleted) + } + + /// Attempt to delete the voting keystore file, or its entire validator directory. + /// + /// Some parts of the VC assume the existence of a validator based on the existence of a + /// directory in the validators dir named like a public key. + fn delete_keystore_or_validator_dir( + &self, + voting_keystore_path: &Path, + voting_keystore: &Keystore, + ) -> Result<(), Error> { + // If the parent directory is a `ValidatorDir` within `self.validators_dir`, then + // delete the entire directory so that it may be recreated if the keystore is + // re-imported. + if let Some(validator_dir) = voting_keystore_path.parent() { + if validator_dir + == ValidatorDirBuilder::get_dir_path(&self.validators_dir, voting_keystore) + { + fs::remove_dir_all(validator_dir) + .map_err(|e| Error::UnableToDeleteValidatorDir(validator_dir.into(), e))?; + return Ok(()); + } + } + // Otherwise just delete the keystore file. + fs::remove_file(voting_keystore_path) + .map_err(|e| Error::UnableToDeleteKeystore(voting_keystore_path.into(), e))?; + Ok(()) + } + /// Returns a slice of all defined validators (regardless of their enabled state). pub fn validator_definitions(&self) -> &[ValidatorDefinition] { self.definitions.as_slice() @@ -456,17 +572,24 @@ impl InitializedValidators { /// Tries to decrypt the key cache. /// - /// Returns `Ok(true)` if decryption was successful, `Ok(false)` if it couldn't get decrypted - /// and an error if a needed password couldn't get extracted. + /// Returns the decrypted cache if decryption was successful, or an error if a required password + /// wasn't provided and couldn't be read interactively. /// + /// In the case that the cache contains UUIDs for unknown validator definitions then it cannot + /// be decrypted and will be replaced by a new empty cache. + /// + /// The mutable `key_stores` argument will be used to accelerate decyption by bypassing + /// filesystem accesses for keystores that are already known. In the case that a keystore + /// from the validator definitions is not yet in this map, it will be loaded from disk and + /// inserted into the map. async fn decrypt_key_cache( &self, mut cache: KeyCache, key_stores: &mut HashMap, ) -> Result { - //read relevant key_stores + // Read relevant key stores from the filesystem. let mut definitions_map = HashMap::new(); - for def in self.definitions.as_slice() { + for def in self.definitions.as_slice().iter().filter(|def| def.enabled) { match &def.signing_definition { SigningDefinition::LocalKeystore { voting_keystore_path, @@ -487,10 +610,11 @@ impl InitializedValidators { //check if all paths are in the definitions_map for uuid in cache.uuids() { if !definitions_map.contains_key(uuid) { - warn!( + debug!( self.log, - "Unknown uuid in cache"; - "uuid" => format!("{}", uuid) + "Resetting the key cache"; + "keystore_uuid" => %uuid, + "reason" => "impossible to decrypt due to missing keystore", ); return Ok(KeyCache::new()); } @@ -547,7 +671,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - async fn update_validators(&mut self) -> Result<(), Error> { + pub(crate) async fn update_validators(&mut self) -> Result<(), Error> { //use key cache if available let mut key_stores = HashMap::new(); diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 7f28700a2..3c12ac1e6 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -6,6 +6,7 @@ use crate::http_metrics::metrics; use eth2_keystore::Keystore; use lockfile::Lockfile; +use parking_lot::Mutex; use reqwest::Client; use std::path::PathBuf; use std::sync::Arc; @@ -75,7 +76,7 @@ pub enum SigningMethod { /// A validator that is defined by an EIP-2335 keystore on the local filesystem. LocalKeystore { voting_keystore_path: PathBuf, - voting_keystore_lockfile: Lockfile, + voting_keystore_lockfile: Mutex>, voting_keystore: Keystore, voting_keypair: Arc, }, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index d7efa806a..884b97694 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -6,7 +6,9 @@ use crate::{ }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; -use slashing_protection::{NotSafe, Safe, SlashingDatabase}; +use slashing_protection::{ + interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, +}; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; use std::iter::FromIterator; @@ -183,7 +185,7 @@ impl ValidatorStore { self.validators .write() - .add_definition(validator_def.clone()) + .add_definition_replace_disabled(validator_def.clone()) .await .map_err(|e| format!("Unable to add definition: {:?}", e))?; @@ -693,6 +695,48 @@ impl ValidatorStore { Ok(SignedContributionAndProof { message, signature }) } + pub fn import_slashing_protection( + &self, + interchange: Interchange, + ) -> Result<(), InterchangeError> { + self.slashing_protection + .import_interchange_info(interchange, self.genesis_validators_root)?; + Ok(()) + } + + /// Export slashing protection data while also disabling the given keys in the database. + /// + /// If any key is unknown to the slashing protection database it will be silently omitted + /// from the result. It is the caller's responsibility to check whether all keys provided + /// had data returned for them. + pub fn export_slashing_protection_for_keys( + &self, + pubkeys: &[PublicKeyBytes], + ) -> Result { + self.slashing_protection.with_transaction(|txn| { + let known_pubkeys = pubkeys + .iter() + .filter_map(|pubkey| { + let validator_id = self + .slashing_protection + .get_validator_id_ignoring_status(txn, pubkey) + .ok()?; + + Some( + self.slashing_protection + .update_validator_status(txn, validator_id, false) + .map(|()| *pubkey), + ) + }) + .collect::, _>>()?; + self.slashing_protection.export_interchange_info_in_txn( + self.genesis_validators_root, + Some(&known_pubkeys), + txn, + ) + }) + } + /// Prune the slashing protection database so that it remains performant. /// /// This function will only do actual pruning periodically, so it should usually be