Unify execution layer endpoints (#3214)

## Issue Addressed

Resolves #3069 

## Proposed Changes

Unify the `eth1-endpoints` and `execution-endpoints` flags in a backwards compatible way as described in https://github.com/sigp/lighthouse/issues/3069#issuecomment-1134219221

Users have 2 options:
1. Use multiple non auth execution endpoints for deposit processing pre-merge
2. Use a single jwt authenticated execution endpoint for both execution layer and deposit processing post merge

Related https://github.com/sigp/lighthouse/issues/3118

To enable jwt authenticated deposit processing, this PR removes the calls to `net_version` as the `net` namespace is not exposed in the auth server in execution clients. 
Moving away from using `networkId` is a good step in my opinion as it doesn't provide us with any added guarantees over `chainId`. See https://github.com/ethereum/consensus-specs/issues/2163 and https://github.com/sigp/lighthouse/issues/2115


Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
Pawan Dhananjay 2022-06-29 09:07:09 +00:00
parent 53b2b500db
commit 5de00b7ee8
31 changed files with 1113 additions and 992 deletions

11
Cargo.lock generated
View File

@ -744,6 +744,7 @@ dependencies = [
"sensitive_url", "sensitive_url",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_yaml",
"slasher", "slasher",
"slasher_service", "slasher_service",
"slog", "slog",
@ -753,7 +754,6 @@ dependencies = [
"time 0.3.9", "time 0.3.9",
"timer", "timer",
"tokio", "tokio",
"toml",
"types", "types",
] ]
@ -1530,6 +1530,7 @@ dependencies = [
"eth2", "eth2",
"eth2_ssz", "eth2_ssz",
"eth2_ssz_derive", "eth2_ssz_derive",
"execution_layer",
"fallback", "fallback",
"futures", "futures",
"hex", "hex",
@ -1541,12 +1542,12 @@ dependencies = [
"sensitive_url", "sensitive_url",
"serde", "serde",
"serde_json", "serde_json",
"serde_yaml",
"slog", "slog",
"sloggers", "sloggers",
"state_processing", "state_processing",
"task_executor", "task_executor",
"tokio", "tokio",
"toml",
"tree_hash", "tree_hash",
"types", "types",
"web3", "web3",
@ -1877,8 +1878,9 @@ dependencies = [
"async-trait", "async-trait",
"bytes", "bytes",
"environment", "environment",
"eth1", "eth2",
"eth2_serde_utils", "eth2_serde_utils",
"eth2_ssz",
"eth2_ssz_types", "eth2_ssz_types",
"ethers-core", "ethers-core",
"exit-future", "exit-future",
@ -1896,6 +1898,7 @@ dependencies = [
"serde_json", "serde_json",
"slog", "slog",
"slot_clock", "slot_clock",
"state_processing",
"task_executor", "task_executor",
"tempfile", "tempfile",
"tokio", "tokio",
@ -3397,6 +3400,7 @@ dependencies = [
"directory", "directory",
"env_logger 0.9.0", "env_logger 0.9.0",
"environment", "environment",
"eth1",
"eth2_hashing", "eth2_hashing",
"eth2_network_config", "eth2_network_config",
"futures", "futures",
@ -5636,6 +5640,7 @@ dependencies = [
"env_logger 0.9.0", "env_logger 0.9.0",
"eth1", "eth1",
"eth1_test_rig", "eth1_test_rig",
"execution_layer",
"futures", "futures",
"node_test_rig", "node_test_rig",
"parking_lot 0.12.1", "parking_lot 0.12.1",

View File

@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021" edition = "2021"
[dev-dependencies] [dev-dependencies]
toml = "0.5.6" serde_yaml = "0.8.13"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }

View File

@ -198,7 +198,8 @@ mod tests {
#[test] #[test]
fn serde() { fn serde() {
let config = Config::default(); let config = Config::default();
let serialized = toml::to_string(&config).expect("should serde encode default config"); let serialized =
toml::from_str::<Config>(&serialized).expect("should serde decode default config"); serde_yaml::to_string(&config).expect("should serde encode default config");
serde_yaml::from_str::<Config>(&serialized).expect("should serde decode default config");
} }
} }

View File

@ -6,13 +6,14 @@ edition = "2021"
[dev-dependencies] [dev-dependencies]
eth1_test_rig = { path = "../../testing/eth1_test_rig" } eth1_test_rig = { path = "../../testing/eth1_test_rig" }
toml = "0.5.6" serde_yaml = "0.8.13"
web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] }
sloggers = { version = "2.1.1", features = ["json"] } sloggers = { version = "2.1.1", features = ["json"] }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
[dependencies] [dependencies]
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
execution_layer = { path = "../execution_layer" }
futures = "0.3.7" futures = "0.3.7"
serde_json = "1.0.58" serde_json = "1.0.58"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }

View File

@ -1,4 +1,4 @@
use crate::DepositLog; use execution_layer::http::deposit_log::DepositLog;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use state_processing::common::DepositDataTree; use state_processing::common::DepositDataTree;
use std::cmp::Ordering; use std::cmp::Ordering;
@ -297,12 +297,37 @@ impl DepositCache {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::deposit_log::tests::EXAMPLE_LOG; use execution_layer::http::deposit_log::Log;
use crate::http::Log;
use types::{EthSpec, MainnetEthSpec}; use types::{EthSpec, MainnetEthSpec};
pub const TREE_DEPTH: usize = 32; pub const TREE_DEPTH: usize = 32;
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
pub const EXAMPLE_LOG: &[u8] = &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82,
248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239,
50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206,
30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213,
119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193,
187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189,
149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221,
18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
fn example_log() -> DepositLog { fn example_log() -> DepositLog {
let spec = MainnetEthSpec::default_spec(); let spec = MainnetEthSpec::default_spec();

View File

@ -1,107 +0,0 @@
use super::http::Log;
use ssz::Decode;
use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message;
use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes};
pub use eth2::lighthouse::DepositLog;
/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The
/// event bytes are formatted according to the Ethereum ABI.
const PUBKEY_START: usize = 192;
const PUBKEY_LEN: usize = 48;
const CREDS_START: usize = PUBKEY_START + 64 + 32;
const CREDS_LEN: usize = 32;
const AMOUNT_START: usize = CREDS_START + 32 + 32;
const AMOUNT_LEN: usize = 8;
const SIG_START: usize = AMOUNT_START + 32 + 32;
const SIG_LEN: usize = 96;
const INDEX_START: usize = SIG_START + 96 + 32;
const INDEX_LEN: usize = 8;
impl Log {
/// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`.
pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result<DepositLog, String> {
let bytes = &self.data;
let pubkey = bytes
.get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN)
.ok_or("Insufficient bytes for pubkey")?;
let withdrawal_credentials = bytes
.get(CREDS_START..CREDS_START + CREDS_LEN)
.ok_or("Insufficient bytes for withdrawal credential")?;
let amount = bytes
.get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN)
.ok_or("Insufficient bytes for amount")?;
let signature = bytes
.get(SIG_START..SIG_START + SIG_LEN)
.ok_or("Insufficient bytes for signature")?;
let index = bytes
.get(INDEX_START..INDEX_START + INDEX_LEN)
.ok_or("Insufficient bytes for index")?;
let deposit_data = DepositData {
pubkey: PublicKeyBytes::from_ssz_bytes(pubkey)
.map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?,
withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials)
.map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?,
amount: u64::from_ssz_bytes(amount)
.map_err(|e| format!("Invalid amount ssz: {:?}", e))?,
signature: SignatureBytes::from_ssz_bytes(signature)
.map_err(|e| format!("Invalid signature ssz: {:?}", e))?,
};
let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec)
.map_or(false, |(public_key, signature, msg)| {
signature.verify(&public_key, msg)
});
Ok(DepositLog {
deposit_data,
block_number: self.block_number,
index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?,
signature_is_valid,
})
}
}
#[cfg(test)]
pub mod tests {
use crate::http::Log;
use types::{EthSpec, MainnetEthSpec};
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
pub const EXAMPLE_LOG: &[u8] = &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82,
248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239,
50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206,
30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213,
119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193,
187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189,
149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221,
18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
#[test]
fn can_parse_example_log() {
let log = Log {
block_number: 42,
data: EXAMPLE_LOG.to_vec(),
};
log.to_deposit_log(&MainnetEthSpec::default_spec())
.expect("should decode log");
}
}

View File

@ -1,489 +0,0 @@
//! Provides a very minimal set of functions for interfacing with the eth2 deposit contract via an
//! eth1 HTTP JSON-RPC endpoint.
//!
//! All remote functions return a future (i.e., are async).
//!
//! Does not use a web3 library, instead it uses `reqwest` (`hyper`) to call the remote endpoint
//! and `serde` to decode the response.
//!
//! ## Note
//!
//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants.
use futures::future::TryFutureExt;
use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode};
use sensitive_url::SensitiveUrl;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::fmt;
use std::ops::Range;
use std::str::FromStr;
use std::time::Duration;
use types::Hash256;
/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")`
pub const DEPOSIT_EVENT_TOPIC: &str =
"0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5";
/// `keccak("get_deposit_root()")[0..4]`
pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f";
/// `keccak("get_deposit_count()")[0..4]`
pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130";
/// Number of bytes in deposit contract deposit root response.
pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96;
/// Number of bytes in deposit contract deposit root (value only).
pub const DEPOSIT_ROOT_BYTES: usize = 32;
/// This error is returned during a `chainId` call by Geth.
pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block";
/// Represents an eth1 chain/network id.
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum Eth1Id {
Goerli,
Mainnet,
Custom(u64),
}
/// Used to identify a block when querying the Eth1 node.
#[derive(Clone, Copy)]
pub enum BlockQuery {
Number(u64),
Latest,
}
/// Represents an error received from a remote procecdure call.
#[derive(Debug, Serialize, Deserialize)]
pub enum RpcError {
NoResultField,
Eip155Error,
InvalidJson(String),
Error(String),
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RpcError::NoResultField => write!(f, "No result field in response"),
RpcError::Eip155Error => write!(f, "Not synced past EIP-155"),
RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e),
RpcError::Error(s) => write!(f, "{}", s),
}
}
}
impl From<RpcError> for String {
fn from(e: RpcError) -> String {
e.to_string()
}
}
impl Into<u64> for Eth1Id {
fn into(self) -> u64 {
match self {
Eth1Id::Mainnet => 1,
Eth1Id::Goerli => 5,
Eth1Id::Custom(id) => id,
}
}
}
impl From<u64> for Eth1Id {
fn from(id: u64) -> Self {
let into = |x: Eth1Id| -> u64 { x.into() };
match id {
id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet,
id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli,
id => Eth1Id::Custom(id),
}
}
}
impl FromStr for Eth1Id {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse::<u64>()
.map(Into::into)
.map_err(|e| format!("Failed to parse eth1 network id {}", e))
}
}
/// Get the eth1 network id of the given endpoint.
pub async fn get_network_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result<Eth1Id, String> {
let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?;
Eth1Id::from_str(
response_result_or_error(&response_body)?
.as_str()
.ok_or("Data was not string")?,
)
}
/// Get the eth1 chain id of the given endpoint.
pub async fn get_chain_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result<Eth1Id, String> {
let response_body: String =
send_rpc_request(endpoint, "eth_chainId", json!([]), timeout).await?;
match response_result_or_error(&response_body) {
Ok(chain_id) => {
hex_to_u64_be(chain_id.as_str().ok_or("Data was not string")?).map(|id| id.into())
}
// Geth returns this error when it's syncing lower blocks. Simply map this into `0` since
// Lighthouse does not raise errors for `0`, it simply waits for it to change.
Err(RpcError::Eip155Error) => Ok(Eth1Id::Custom(0)),
Err(e) => Err(e.to_string()),
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct Block {
pub hash: Hash256,
pub timestamp: u64,
pub number: u64,
}
/// Returns the current block number.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
pub async fn get_block_number(endpoint: &SensitiveUrl, timeout: Duration) -> Result<u64, String> {
let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?;
hex_to_u64_be(
response_result_or_error(&response_body)
.map_err(|e| format!("eth_blockNumber failed: {}", e))?
.as_str()
.ok_or("Data was not string")?,
)
.map_err(|e| format!("Failed to get block number: {}", e))
}
/// Gets a block hash by block number.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
pub async fn get_block(
endpoint: &SensitiveUrl,
query: BlockQuery,
timeout: Duration,
) -> Result<Block, String> {
let query_param = match query {
BlockQuery::Number(block_number) => format!("0x{:x}", block_number),
BlockQuery::Latest => "latest".to_string(),
};
let params = json!([
query_param,
false // do not return full tx objects.
]);
let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?;
let response = response_result_or_error(&response_body)
.map_err(|e| format!("eth_getBlockByNumber failed: {}", e))?;
let hash: Vec<u8> = hex_to_bytes(
response
.get("hash")
.ok_or("No hash for block")?
.as_str()
.ok_or("Block hash was not string")?,
)?;
let hash: Hash256 = if hash.len() == 32 {
Hash256::from_slice(&hash)
} else {
return Err(format!("Block has was not 32 bytes: {:?}", hash));
};
let timestamp = hex_to_u64_be(
response
.get("timestamp")
.ok_or("No timestamp for block")?
.as_str()
.ok_or("Block timestamp was not string")?,
)?;
let number = hex_to_u64_be(
response
.get("number")
.ok_or("No number for block")?
.as_str()
.ok_or("Block number was not string")?,
)?;
if number <= usize::max_value() as u64 {
Ok(Block {
hash,
timestamp,
number,
})
} else {
Err(format!("Block number {} is larger than a usize", number))
}
.map_err(|e| format!("Failed to get block number: {}", e))
}
/// Returns the value of the `get_deposit_count()` call at the given `address` for the given
/// `block_number`.
///
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
pub async fn get_deposit_count(
endpoint: &SensitiveUrl,
address: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<u64>, String> {
let result = call(
endpoint,
address,
DEPOSIT_COUNT_FN_SIGNATURE,
block_number,
timeout,
)
.await?;
match result {
None => Err("Deposit root response was none".to_string()),
Some(bytes) => {
if bytes.is_empty() {
Ok(None)
} else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES {
let mut array = [0; 8];
array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]);
Ok(Some(u64::from_le_bytes(array)))
} else {
Err(format!(
"Deposit count response was not {} bytes: {:?}",
DEPOSIT_COUNT_RESPONSE_BYTES, bytes
))
}
}
}
}
/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`.
///
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
pub async fn get_deposit_root(
endpoint: &SensitiveUrl,
address: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<Hash256>, String> {
let result = call(
endpoint,
address,
DEPOSIT_ROOT_FN_SIGNATURE,
block_number,
timeout,
)
.await?;
match result {
None => Err("Deposit root response was none".to_string()),
Some(bytes) => {
if bytes.is_empty() {
Ok(None)
} else if bytes.len() == DEPOSIT_ROOT_BYTES {
Ok(Some(Hash256::from_slice(&bytes)))
} else {
Err(format!(
"Deposit root response was not {} bytes: {:?}",
DEPOSIT_ROOT_BYTES, bytes
))
}
}
}
}
/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed
/// `hex_data`.
///
/// Returns bytes, if any.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
async fn call(
endpoint: &SensitiveUrl,
address: &str,
hex_data: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<Vec<u8>>, String> {
let params = json! ([
{
"to": address,
"data": hex_data,
},
format!("0x{:x}", block_number)
]);
let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?;
match response_result_or_error(&response_body) {
Ok(result) => {
let hex = result
.as_str()
.map(|s| s.to_string())
.ok_or("'result' value was not a string")?;
Ok(Some(hex_to_bytes(&hex)?))
}
// It's valid for `eth_call` to return without a result.
Err(RpcError::NoResultField) => Ok(None),
Err(e) => Err(format!("eth_call failed: {}", e)),
}
}
/// A reduced set of fields from an Eth1 contract log.
#[derive(Debug, PartialEq, Clone)]
pub struct Log {
pub(crate) block_number: u64,
pub(crate) data: Vec<u8>,
}
/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given
/// `block_height_range`.
///
/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not.
///
/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`.
pub async fn get_deposit_logs_in_range(
endpoint: &SensitiveUrl,
address: &str,
block_height_range: Range<u64>,
timeout: Duration,
) -> Result<Vec<Log>, String> {
let params = json! ([{
"address": address,
"topics": [DEPOSIT_EVENT_TOPIC],
"fromBlock": format!("0x{:x}", block_height_range.start),
"toBlock": format!("0x{:x}", block_height_range.end),
}]);
let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?;
response_result_or_error(&response_body)
.map_err(|e| format!("eth_getLogs failed: {}", e))?
.as_array()
.cloned()
.ok_or("'result' value was not an array")?
.into_iter()
.map(|value| {
let block_number = value
.get("blockNumber")
.ok_or("No block number field in log")?
.as_str()
.ok_or("Block number was not string")?;
let data = value
.get("data")
.ok_or("No block number field in log")?
.as_str()
.ok_or("Data was not string")?;
Ok(Log {
block_number: hex_to_u64_be(block_number)?,
data: hex_to_bytes(data)?,
})
})
.collect::<Result<Vec<Log>, String>>()
.map_err(|e| format!("Failed to get logs in range: {}", e))
}
/// Sends an RPC request to `endpoint`, using a POST with the given `body`.
///
/// Tries to receive the response and parse the body as a `String`.
pub async fn send_rpc_request(
endpoint: &SensitiveUrl,
method: &str,
params: Value,
timeout: Duration,
) -> Result<String, String> {
let body = json! ({
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": 1
})
.to_string();
// Note: it is not ideal to create a new client for each request.
//
// A better solution would be to create some struct that contains a built client and pass it
// around (similar to the `web3` crate's `Transport` structs).
let response = ClientBuilder::new()
.timeout(timeout)
.build()
.expect("The builder should always build a client")
.post(endpoint.full.clone())
.header(CONTENT_TYPE, "application/json")
.body(body)
.send()
.map_err(|e| format!("Request failed: {:?}", e))
.await?;
if response.status() != StatusCode::OK {
return Err(format!(
"Response HTTP status was not 200 OK: {}.",
response.status()
));
};
let encoding = response
.headers()
.get(CONTENT_TYPE)
.ok_or("No content-type header in response")?
.to_str()
.map(|s| s.to_string())
.map_err(|e| format!("Failed to parse content-type header: {}", e))?;
response
.bytes()
.map_err(|e| format!("Failed to receive body: {:?}", e))
.await
.and_then(move |bytes| match encoding.as_str() {
"application/json" => Ok(bytes),
"application/json; charset=utf-8" => Ok(bytes),
other => Err(format!("Unsupported encoding: {}", other)),
})
.map(|bytes| String::from_utf8_lossy(&bytes).into_owned())
.map_err(|e| format!("Failed to receive body: {:?}", e))
}
/// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`.
fn response_result_or_error(response: &str) -> Result<Value, RpcError> {
let json = serde_json::from_str::<Value>(response)
.map_err(|e| RpcError::InvalidJson(e.to_string()))?;
if let Some(error) = json.get("error").and_then(|e| e.get("message")) {
let error = error.to_string();
if error.contains(EIP155_ERROR_STR) {
Err(RpcError::Eip155Error)
} else {
Err(RpcError::Error(error))
}
} else {
json.get("result").cloned().ok_or(RpcError::NoResultField)
}
}
/// Parses a `0x`-prefixed, **big-endian** hex string as a u64.
///
/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian.
/// Therefore, this function is only useful for numbers encoded by the JSON RPC.
///
/// E.g., `0x01 == 1`
fn hex_to_u64_be(hex: &str) -> Result<u64, String> {
u64::from_str_radix(strip_prefix(hex)?, 16)
.map_err(|e| format!("Failed to parse hex as u64: {:?}", e))
}
/// Parses a `0x`-prefixed, big-endian hex string as bytes.
///
/// E.g., `0x0102 == vec![1, 2]`
fn hex_to_bytes(hex: &str) -> Result<Vec<u8>, String> {
hex::decode(strip_prefix(hex)?).map_err(|e| format!("Failed to parse hex as bytes: {:?}", e))
}
/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present.
fn strip_prefix(hex: &str) -> Result<&str, String> {
if let Some(stripped) = hex.strip_prefix("0x") {
Ok(stripped)
} else {
Err("Hex string did not start with `0x`".to_string())
}
}

View File

@ -3,17 +3,15 @@ extern crate lazy_static;
mod block_cache; mod block_cache;
mod deposit_cache; mod deposit_cache;
mod deposit_log;
pub mod http;
mod inner; mod inner;
mod metrics; mod metrics;
mod service; mod service;
pub use block_cache::{BlockCache, Eth1Block}; pub use block_cache::{BlockCache, Eth1Block};
pub use deposit_cache::DepositCache; pub use deposit_cache::DepositCache;
pub use deposit_log::DepositLog; pub use execution_layer::http::deposit_log::DepositLog;
pub use inner::SszEth1Cache; pub use inner::SszEth1Cache;
pub use service::{ pub use service::{
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_CHAIN_ID, BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service,
DEFAULT_NETWORK_ID, DEFAULT_CHAIN_ID,
}; };

View File

@ -2,12 +2,13 @@ use crate::metrics;
use crate::{ use crate::{
block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, block_cache::{BlockCache, Error as BlockCacheError, Eth1Block},
deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError},
http::{
get_block, get_block_number, get_chain_id, get_deposit_logs_in_range, get_network_id,
BlockQuery, Eth1Id,
},
inner::{DepositUpdater, Inner}, inner::{DepositUpdater, Inner},
}; };
use execution_layer::auth::Auth;
use execution_layer::http::{
deposit_methods::{BlockQuery, Eth1Id},
HttpJsonRpc,
};
use fallback::{Fallback, FallbackError}; use fallback::{Fallback, FallbackError};
use futures::future::TryFutureExt; use futures::future::TryFutureExt;
use parking_lot::{RwLock, RwLockReadGuard}; use parking_lot::{RwLock, RwLockReadGuard};
@ -17,14 +18,13 @@ use slog::{crit, debug, error, info, trace, warn, Logger};
use std::fmt::Debug; use std::fmt::Debug;
use std::future::Future; use std::future::Future;
use std::ops::{Range, RangeInclusive}; use std::ops::{Range, RangeInclusive};
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock as TRwLock; use tokio::sync::RwLock as TRwLock;
use tokio::time::{interval_at, Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use types::{ChainSpec, EthSpec, Unsigned}; use types::{ChainSpec, EthSpec, Unsigned};
/// Indicates the default eth1 network id we use for the deposit contract.
pub const DEFAULT_NETWORK_ID: Eth1Id = Eth1Id::Goerli;
/// Indicates the default eth1 chain id we use for the deposit contract. /// Indicates the default eth1 chain id we use for the deposit contract.
pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli;
/// Indicates the default eth1 endpoint. /// Indicates the default eth1 endpoint.
@ -63,14 +63,14 @@ pub enum EndpointError {
type EndpointState = Result<(), EndpointError>; type EndpointState = Result<(), EndpointError>;
pub struct EndpointWithState { pub struct EndpointWithState {
endpoint: SensitiveUrl, client: HttpJsonRpc,
state: TRwLock<Option<EndpointState>>, state: TRwLock<Option<EndpointState>>,
} }
impl EndpointWithState { impl EndpointWithState {
pub fn new(endpoint: SensitiveUrl) -> Self { pub fn new(client: HttpJsonRpc) -> Self {
Self { Self {
endpoint, client,
state: TRwLock::new(None), state: TRwLock::new(None),
} }
} }
@ -89,7 +89,6 @@ async fn get_state(endpoint: &EndpointWithState) -> Option<EndpointState> {
/// is not usable. /// is not usable.
pub struct EndpointsCache { pub struct EndpointsCache {
pub fallback: Fallback<EndpointWithState>, pub fallback: Fallback<EndpointWithState>,
pub config_network_id: Eth1Id,
pub config_chain_id: Eth1Id, pub config_chain_id: Eth1Id,
pub log: Logger, pub log: Logger,
} }
@ -107,20 +106,14 @@ impl EndpointsCache {
} }
crate::metrics::inc_counter_vec( crate::metrics::inc_counter_vec(
&crate::metrics::ENDPOINT_REQUESTS, &crate::metrics::ENDPOINT_REQUESTS,
&[&endpoint.endpoint.to_string()], &[&endpoint.client.to_string()],
); );
let state = endpoint_state( let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await;
&endpoint.endpoint,
&self.config_network_id,
&self.config_chain_id,
&self.log,
)
.await;
*value = Some(state.clone()); *value = Some(state.clone());
if state.is_err() { if state.is_err() {
crate::metrics::inc_counter_vec( crate::metrics::inc_counter_vec(
&crate::metrics::ENDPOINT_ERRORS, &crate::metrics::ENDPOINT_ERRORS,
&[&endpoint.endpoint.to_string()], &[&endpoint.client.to_string()],
); );
crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0);
} else { } else {
@ -136,7 +129,7 @@ impl EndpointsCache {
func: F, func: F,
) -> Result<(O, usize), FallbackError<SingleEndpointError>> ) -> Result<(O, usize), FallbackError<SingleEndpointError>>
where where
F: Fn(&'a SensitiveUrl) -> R, F: Fn(&'a HttpJsonRpc) -> R,
R: Future<Output = Result<O, SingleEndpointError>>, R: Future<Output = Result<O, SingleEndpointError>>,
{ {
let func = &func; let func = &func;
@ -144,12 +137,12 @@ impl EndpointsCache {
.first_success(|endpoint| async move { .first_success(|endpoint| async move {
match self.state(endpoint).await { match self.state(endpoint).await {
Ok(()) => { Ok(()) => {
let endpoint_str = &endpoint.endpoint.to_string(); let endpoint_str = &endpoint.client.to_string();
crate::metrics::inc_counter_vec( crate::metrics::inc_counter_vec(
&crate::metrics::ENDPOINT_REQUESTS, &crate::metrics::ENDPOINT_REQUESTS,
&[endpoint_str], &[endpoint_str],
); );
match func(&endpoint.endpoint).await { match func(&endpoint.client).await {
Ok(t) => Ok(t), Ok(t) => Ok(t),
Err(t) => { Err(t) => {
crate::metrics::inc_counter_vec( crate::metrics::inc_counter_vec(
@ -186,8 +179,7 @@ impl EndpointsCache {
/// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and
/// chain id. Otherwise it returns `Err`. /// chain id. Otherwise it returns `Err`.
async fn endpoint_state( async fn endpoint_state(
endpoint: &SensitiveUrl, endpoint: &HttpJsonRpc,
config_network_id: &Eth1Id,
config_chain_id: &Eth1Id, config_chain_id: &Eth1Id,
log: &Logger, log: &Logger,
) -> EndpointState { ) -> EndpointState {
@ -200,21 +192,9 @@ async fn endpoint_state(
); );
EndpointError::RequestFailed(e) EndpointError::RequestFailed(e)
}; };
let network_id = get_network_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS))
.await let chain_id = endpoint
.map_err(error_connecting)?; .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS))
if &network_id != config_network_id {
warn!(
log,
"Invalid eth1 network id on endpoint. Please switch to correct network id";
"endpoint" => %endpoint,
"action" => "trying fallbacks",
"expected" => format!("{:?}",config_network_id),
"received" => format!("{:?}",network_id),
);
return Err(EndpointError::WrongNetworkId);
}
let chain_id = get_chain_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS))
.await .await
.map_err(error_connecting)?; .map_err(error_connecting)?;
// Eth1 nodes return chain_id = 0 if the node is not synced // Eth1 nodes return chain_id = 0 if the node is not synced
@ -253,7 +233,7 @@ pub enum HeadType {
/// Returns the head block and the new block ranges relevant for deposits and the block cache /// Returns the head block and the new block ranges relevant for deposits and the block cache
/// from the given endpoint. /// from the given endpoint.
async fn get_remote_head_and_new_block_ranges( async fn get_remote_head_and_new_block_ranges(
endpoint: &SensitiveUrl, endpoint: &HttpJsonRpc,
service: &Service, service: &Service,
node_far_behind_seconds: u64, node_far_behind_seconds: u64,
) -> Result< ) -> Result<
@ -315,12 +295,12 @@ async fn get_remote_head_and_new_block_ranges(
/// Returns the range of new block numbers to be considered for the given head type from the given /// Returns the range of new block numbers to be considered for the given head type from the given
/// endpoint. /// endpoint.
async fn relevant_new_block_numbers_from_endpoint( async fn relevant_new_block_numbers_from_endpoint(
endpoint: &SensitiveUrl, endpoint: &HttpJsonRpc,
service: &Service, service: &Service,
head_type: HeadType, head_type: HeadType,
) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> { ) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> {
let remote_highest_block = let remote_highest_block = endpoint
get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS))
.map_err(SingleEndpointError::GetBlockNumberFailed) .map_err(SingleEndpointError::GetBlockNumberFailed)
.await?; .await?;
service.relevant_new_block_numbers(remote_highest_block, None, head_type) service.relevant_new_block_numbers(remote_highest_block, None, head_type)
@ -379,14 +359,41 @@ pub struct DepositCacheUpdateOutcome {
pub logs_imported: usize, pub logs_imported: usize,
} }
/// Supports either one authenticated jwt JSON-RPC endpoint **or**
/// multiple non-authenticated endpoints with fallback.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum Eth1Endpoint {
Auth {
endpoint: SensitiveUrl,
jwt_path: PathBuf,
jwt_id: Option<String>,
jwt_version: Option<String>,
},
NoAuth(Vec<SensitiveUrl>),
}
impl Eth1Endpoint {
fn len(&self) -> usize {
match &self {
Self::Auth { .. } => 1,
Self::NoAuth(urls) => urls.len(),
}
}
pub fn get_endpoints(&self) -> Vec<SensitiveUrl> {
match &self {
Self::Auth { endpoint, .. } => vec![endpoint.clone()],
Self::NoAuth(endpoints) => endpoints.clone(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config { pub struct Config {
/// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint.
pub endpoints: Vec<SensitiveUrl>, pub endpoints: Eth1Endpoint,
/// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract.
pub deposit_contract_address: String, pub deposit_contract_address: String,
/// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet).
pub network_id: Eth1Id,
/// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet).
pub chain_id: Eth1Id, pub chain_id: Eth1Id,
/// Defines the first block that the `DepositCache` will start searching for deposit logs. /// Defines the first block that the `DepositCache` will start searching for deposit logs.
@ -461,10 +468,9 @@ impl Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
Self { Self {
endpoints: vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT)
.expect("The default Eth1 endpoint must always be a valid URL.")], .expect("The default Eth1 endpoint must always be a valid URL.")]),
deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(),
network_id: DEFAULT_NETWORK_ID,
chain_id: DEFAULT_CHAIN_ID, chain_id: DEFAULT_CHAIN_ID,
deposit_contract_deploy_block: 1, deposit_contract_deploy_block: 1,
lowest_cached_block_number: 1, lowest_cached_block_number: 1,
@ -673,27 +679,45 @@ impl Service {
} }
/// Builds a new `EndpointsCache` with empty states. /// Builds a new `EndpointsCache` with empty states.
pub fn init_endpoints(&self) -> Arc<EndpointsCache> { pub fn init_endpoints(&self) -> Result<Arc<EndpointsCache>, String> {
let endpoints = self.config().endpoints.clone(); let endpoints = self.config().endpoints.clone();
let config_network_id = self.config().network_id.clone();
let config_chain_id = self.config().chain_id.clone(); let config_chain_id = self.config().chain_id.clone();
let servers = match endpoints {
Eth1Endpoint::Auth {
jwt_path,
endpoint,
jwt_id,
jwt_version,
} => {
let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version)
.map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?;
vec![HttpJsonRpc::new_with_auth(endpoint, auth)
.map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?]
}
Eth1Endpoint::NoAuth(urls) => urls
.into_iter()
.map(|url| {
HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e))
})
.collect::<Result<_, _>>()?,
};
let new_cache = Arc::new(EndpointsCache { let new_cache = Arc::new(EndpointsCache {
fallback: Fallback::new(endpoints.into_iter().map(EndpointWithState::new).collect()), fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()),
config_network_id,
config_chain_id, config_chain_id,
log: self.log.clone(), log: self.log.clone(),
}); });
let mut endpoints_cache = self.inner.endpoints_cache.write(); let mut endpoints_cache = self.inner.endpoints_cache.write();
*endpoints_cache = Some(new_cache.clone()); *endpoints_cache = Some(new_cache.clone());
new_cache Ok(new_cache)
} }
/// Returns the cached `EndpointsCache` if it exists or builds a new one. /// Returns the cached `EndpointsCache` if it exists or builds a new one.
pub fn get_endpoints(&self) -> Arc<EndpointsCache> { pub fn get_endpoints(&self) -> Result<Arc<EndpointsCache>, String> {
let endpoints_cache = self.inner.endpoints_cache.read(); let endpoints_cache = self.inner.endpoints_cache.read();
if let Some(cache) = endpoints_cache.clone() { if let Some(cache) = endpoints_cache.clone() {
cache Ok(cache)
} else { } else {
drop(endpoints_cache); drop(endpoints_cache);
self.init_endpoints() self.init_endpoints()
@ -711,7 +735,7 @@ impl Service {
pub async fn update( pub async fn update(
&self, &self,
) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> {
let endpoints = self.get_endpoints(); let endpoints = self.get_endpoints()?;
// Reset the state of any endpoints which have errored so their state can be redetermined. // Reset the state of any endpoints which have errored so their state can be redetermined.
endpoints.reset_errorred_endpoints().await; endpoints.reset_errorred_endpoints().await;
@ -738,7 +762,7 @@ impl Service {
} }
} }
} }
endpoints.fallback.map_format_error(|s| &s.endpoint, e) endpoints.fallback.map_format_error(|s| &s.client, e)
}; };
let process_err = |e: Error| match &e { let process_err = |e: Error| match &e {
@ -988,9 +1012,9 @@ impl Service {
*/ */
let block_range_ref = &block_range; let block_range_ref = &block_range;
let logs = endpoints let logs = endpoints
.first_success(|e| async move { .first_success(|endpoint| async move {
get_deposit_logs_in_range( endpoint
e, .get_deposit_logs_in_range(
deposit_contract_address_ref, deposit_contract_address_ref,
block_range_ref.clone(), block_range_ref.clone(),
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
@ -1305,7 +1329,7 @@ fn relevant_block_range(
/// ///
/// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. /// Performs three async calls to an Eth1 HTTP JSON RPC endpoint.
async fn download_eth1_block( async fn download_eth1_block(
endpoint: &SensitiveUrl, endpoint: &HttpJsonRpc,
cache: Arc<Inner>, cache: Arc<Inner>,
block_number_opt: Option<u64>, block_number_opt: Option<u64>,
) -> Result<Eth1Block, SingleEndpointError> { ) -> Result<Eth1Block, SingleEndpointError> {
@ -1326,8 +1350,8 @@ async fn download_eth1_block(
}); });
// Performs a `get_blockByNumber` call to an eth1 node. // Performs a `get_blockByNumber` call to an eth1 node.
let http_block = get_block( let http_block = endpoint
endpoint, .get_block(
block_number_opt block_number_opt
.map(BlockQuery::Number) .map(BlockQuery::Number)
.unwrap_or_else(|| BlockQuery::Latest), .unwrap_or_else(|| BlockQuery::Latest),
@ -1359,8 +1383,8 @@ mod tests {
#[test] #[test]
fn serde_serialize() { fn serde_serialize() {
let serialized = let serialized =
toml::to_string(&Config::default()).expect("Should serde encode default config"); serde_yaml::to_string(&Config::default()).expect("Should serde encode default config");
toml::from_str::<Config>(&serialized).expect("Should serde decode default config"); serde_yaml::from_str::<Config>(&serialized).expect("Should serde decode default config");
} }
#[test] #[test]

View File

@ -1,9 +1,9 @@
#![cfg(test)] #![cfg(test)]
use environment::{Environment, EnvironmentBuilder}; use environment::{Environment, EnvironmentBuilder};
use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log}; use eth1::{Config, Eth1Endpoint, Service};
use eth1::{Config, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID};
use eth1::{DepositCache, DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID};
use eth1_test_rig::GanacheEth1Instance; use eth1_test_rig::GanacheEth1Instance;
use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log};
use merkle_proof::verify_merkle_proof; use merkle_proof::verify_merkle_proof;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use slog::Logger; use slog::Logger;
@ -51,37 +51,37 @@ fn random_deposit_data() -> DepositData {
} }
/// Blocking operation to get the deposit logs from the `deposit_contract`. /// Blocking operation to get the deposit logs from the `deposit_contract`.
async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range<u64>) -> Vec<Log> { async fn blocking_deposit_logs(
get_deposit_logs_in_range( client: &HttpJsonRpc,
&SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), eth1: &GanacheEth1Instance,
&eth1.deposit_contract.address(), range: Range<u64>,
range, ) -> Vec<Log> {
timeout(), client
) .get_deposit_logs_in_range(&eth1.deposit_contract.address(), range, timeout())
.await .await
.expect("should get logs") .expect("should get logs")
} }
/// Blocking operation to get the deposit root from the `deposit_contract`. /// Blocking operation to get the deposit root from the `deposit_contract`.
async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option<Hash256> { async fn blocking_deposit_root(
get_deposit_root( client: &HttpJsonRpc,
&SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), eth1: &GanacheEth1Instance,
&eth1.deposit_contract.address(), block_number: u64,
block_number, ) -> Option<Hash256> {
timeout(), client
) .get_deposit_root(&eth1.deposit_contract.address(), block_number, timeout())
.await .await
.expect("should get deposit root") .expect("should get deposit root")
} }
/// Blocking operation to get the deposit count from the `deposit_contract`. /// Blocking operation to get the deposit count from the `deposit_contract`.
async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option<u64> { async fn blocking_deposit_count(
get_deposit_count( client: &HttpJsonRpc,
&SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), eth1: &GanacheEth1Instance,
&eth1.deposit_contract.address(), block_number: u64,
block_number, ) -> Option<u64> {
timeout(), client
) .get_deposit_count(&eth1.deposit_contract.address(), block_number, timeout())
.await .await
.expect("should get deposit count") .expect("should get deposit count")
} }
@ -95,7 +95,7 @@ async fn get_block_number(web3: &Web3<Http>) -> u64 {
} }
async fn new_ganache_instance() -> Result<GanacheEth1Instance, String> { async fn new_ganache_instance() -> Result<GanacheEth1Instance, String> {
GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await
} }
mod eth1_cache { mod eth1_cache {
@ -117,7 +117,10 @@ mod eth1_cache {
let initial_block_number = get_block_number(&web3).await; let initial_block_number = get_block_number(&web3).await;
let config = Config { let config = Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number, lowest_cached_block_number: initial_block_number,
follow_distance, follow_distance,
@ -146,7 +149,7 @@ mod eth1_cache {
eth1.ganache.evm_mine().await.expect("should mine block"); eth1.ganache.evm_mine().await.expect("should mine block");
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
@ -198,7 +201,10 @@ mod eth1_cache {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(&web3).await, lowest_cached_block_number: get_block_number(&web3).await,
follow_distance: 0, follow_distance: 0,
@ -215,7 +221,7 @@ mod eth1_cache {
eth1.ganache.evm_mine().await.expect("should mine block") eth1.ganache.evm_mine().await.expect("should mine block")
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
@ -252,7 +258,10 @@ mod eth1_cache {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(&web3).await, lowest_cached_block_number: get_block_number(&web3).await,
follow_distance: 0, follow_distance: 0,
@ -267,7 +276,7 @@ mod eth1_cache {
for _ in 0..cache_len / 2 { for _ in 0..cache_len / 2 {
eth1.ganache.evm_mine().await.expect("should mine block") eth1.ganache.evm_mine().await.expect("should mine block")
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
.await .await
@ -302,7 +311,10 @@ mod eth1_cache {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: get_block_number(&web3).await, lowest_cached_block_number: get_block_number(&web3).await,
follow_distance: 0, follow_distance: 0,
@ -316,7 +328,7 @@ mod eth1_cache {
eth1.ganache.evm_mine().await.expect("should mine block") eth1.ganache.evm_mine().await.expect("should mine block")
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
futures::try_join!( futures::try_join!(
service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints),
service.update_deposit_cache(None, &endpoints) service.update_deposit_cache(None, &endpoints)
@ -354,7 +366,10 @@ mod deposit_tree {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: start_block, deposit_contract_deploy_block: start_block,
follow_distance: 0, follow_distance: 0,
@ -374,7 +389,7 @@ mod deposit_tree {
.expect("should perform a deposit"); .expect("should perform a deposit");
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
@ -434,7 +449,10 @@ mod deposit_tree {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: start_block, deposit_contract_deploy_block: start_block,
lowest_cached_block_number: start_block, lowest_cached_block_number: start_block,
@ -454,7 +472,7 @@ mod deposit_tree {
.expect("should perform a deposit"); .expect("should perform a deposit");
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
futures::try_join!( futures::try_join!(
service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints),
service.update_deposit_cache(None, &endpoints) service.update_deposit_cache(None, &endpoints)
@ -484,6 +502,8 @@ mod deposit_tree {
let mut deposit_roots = vec![]; let mut deposit_roots = vec![];
let mut deposit_counts = vec![]; let mut deposit_counts = vec![];
let client = HttpJsonRpc::new(SensitiveUrl::parse(&eth1.endpoint()).unwrap()).unwrap();
// Perform deposits to the smart contract, recording it's state along the way. // Perform deposits to the smart contract, recording it's state along the way.
for deposit in &deposits { for deposit in &deposits {
deposit_contract deposit_contract
@ -492,12 +512,12 @@ mod deposit_tree {
.expect("should perform a deposit"); .expect("should perform a deposit");
let block_number = get_block_number(&web3).await; let block_number = get_block_number(&web3).await;
deposit_roots.push( deposit_roots.push(
blocking_deposit_root(&eth1, block_number) blocking_deposit_root(&client, &eth1, block_number)
.await .await
.expect("should get root if contract exists"), .expect("should get root if contract exists"),
); );
deposit_counts.push( deposit_counts.push(
blocking_deposit_count(&eth1, block_number) blocking_deposit_count(&client, &eth1, block_number)
.await .await
.expect("should get count if contract exists"), .expect("should get count if contract exists"),
); );
@ -507,7 +527,7 @@ mod deposit_tree {
// Pull all the deposit logs from the contract. // Pull all the deposit logs from the contract.
let block_number = get_block_number(&web3).await; let block_number = get_block_number(&web3).await;
let logs: Vec<_> = blocking_deposit_logs(&eth1, 0..block_number) let logs: Vec<_> = blocking_deposit_logs(&client, &eth1, 0..block_number)
.await .await
.iter() .iter()
.map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log"))
@ -570,14 +590,10 @@ mod deposit_tree {
/// Tests for the base HTTP requests and response handlers. /// Tests for the base HTTP requests and response handlers.
mod http { mod http {
use super::*; use super::*;
use eth1::http::BlockQuery;
async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block { async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block {
eth1::http::get_block( client
&SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), .get_block(BlockQuery::Number(block_number), timeout())
BlockQuery::Number(block_number),
timeout(),
)
.await .await
.expect("should get block number") .expect("should get block number")
} }
@ -590,17 +606,18 @@ mod http {
.expect("should start eth1 environment"); .expect("should start eth1 environment");
let deposit_contract = &eth1.deposit_contract; let deposit_contract = &eth1.deposit_contract;
let web3 = eth1.web3(); let web3 = eth1.web3();
let client = HttpJsonRpc::new(SensitiveUrl::parse(&eth1.endpoint()).unwrap()).unwrap();
let block_number = get_block_number(&web3).await; let block_number = get_block_number(&web3).await;
let logs = blocking_deposit_logs(&eth1, 0..block_number).await; let logs = blocking_deposit_logs(&client, &eth1, 0..block_number).await;
assert_eq!(logs.len(), 0); assert_eq!(logs.len(), 0);
let mut old_root = blocking_deposit_root(&eth1, block_number).await; let mut old_root = blocking_deposit_root(&client, &eth1, block_number).await;
let mut old_block = get_block(&eth1, block_number).await; let mut old_block = get_block(&client, block_number).await;
let mut old_block_number = block_number; let mut old_block_number = block_number;
assert_eq!( assert_eq!(
blocking_deposit_count(&eth1, block_number).await, blocking_deposit_count(&client, &eth1, block_number).await,
Some(0), Some(0),
"should have deposit count zero" "should have deposit count zero"
); );
@ -618,18 +635,18 @@ mod http {
// Check the logs. // Check the logs.
let block_number = get_block_number(&web3).await; let block_number = get_block_number(&web3).await;
let logs = blocking_deposit_logs(&eth1, 0..block_number).await; let logs = blocking_deposit_logs(&client, &eth1, 0..block_number).await;
assert_eq!(logs.len(), i, "the number of logs should be as expected"); assert_eq!(logs.len(), i, "the number of logs should be as expected");
// Check the deposit count. // Check the deposit count.
assert_eq!( assert_eq!(
blocking_deposit_count(&eth1, block_number).await, blocking_deposit_count(&client, &eth1, block_number).await,
Some(i as u64), Some(i as u64),
"should have a correct deposit count" "should have a correct deposit count"
); );
// Check the deposit root. // Check the deposit root.
let new_root = blocking_deposit_root(&eth1, block_number).await; let new_root = blocking_deposit_root(&client, &eth1, block_number).await;
assert_ne!( assert_ne!(
new_root, old_root, new_root, old_root,
"deposit root should change with each deposit" "deposit root should change with each deposit"
@ -637,7 +654,7 @@ mod http {
old_root = new_root; old_root = new_root;
// Check the block hash. // Check the block hash.
let new_block = get_block(&eth1, block_number).await; let new_block = get_block(&client, block_number).await;
assert_ne!( assert_ne!(
new_block.hash, old_block.hash, new_block.hash, old_block.hash,
"block hash should change with each deposit" "block hash should change with each deposit"
@ -689,7 +706,10 @@ mod fast {
let now = get_block_number(&web3).await; let now = get_block_number(&web3).await;
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: now, deposit_contract_deploy_block: now,
lowest_cached_block_number: now, lowest_cached_block_number: now,
@ -700,6 +720,7 @@ mod fast {
log, log,
MainnetEthSpec::default_spec(), MainnetEthSpec::default_spec(),
); );
let client = HttpJsonRpc::new(SensitiveUrl::parse(&eth1.endpoint()).unwrap()).unwrap();
let n = 10; let n = 10;
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
for deposit in &deposits { for deposit in &deposits {
@ -711,7 +732,7 @@ mod fast {
eth1.ganache.evm_mine().await.expect("should mine block"); eth1.ganache.evm_mine().await.expect("should mine block");
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
.await .await
@ -723,8 +744,9 @@ mod fast {
); );
for block_num in 0..=get_block_number(&web3).await { for block_num in 0..=get_block_number(&web3).await {
let expected_deposit_count = blocking_deposit_count(&eth1, block_num).await; let expected_deposit_count =
let expected_deposit_root = blocking_deposit_root(&eth1, block_num).await; blocking_deposit_count(&client, &eth1, block_num).await;
let expected_deposit_root = blocking_deposit_root(&client, &eth1, block_num).await;
let deposit_count = service let deposit_count = service
.deposits() .deposits()
@ -765,7 +787,10 @@ mod persist {
let now = get_block_number(&web3).await; let now = get_block_number(&web3).await;
let config = Config { let config = Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: now, deposit_contract_deploy_block: now,
lowest_cached_block_number: now, lowest_cached_block_number: now,
@ -783,7 +808,7 @@ mod persist {
.expect("should perform a deposit"); .expect("should perform a deposit");
} }
let endpoints = service.init_endpoints(); let endpoints = service.init_endpoints().unwrap();
service service
.update_deposit_cache(None, &endpoints) .update_deposit_cache(None, &endpoints)
.await .await
@ -874,10 +899,10 @@ mod fallbacks {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![ endpoints: Eth1Endpoint::NoAuth(vec![
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
], ]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number, lowest_cached_block_number: initial_block_number,
follow_distance: 0, follow_distance: 0,
@ -909,82 +934,13 @@ mod fallbacks {
.await; .await;
} }
#[tokio::test]
async fn test_fallback_when_wrong_network_id() {
async {
let log = null_logger();
let correct_network_id: u64 = DEFAULT_NETWORK_ID.into();
let wrong_network_id = correct_network_id + 1;
let endpoint1 = GanacheEth1Instance::new(wrong_network_id, DEFAULT_CHAIN_ID.into())
.await
.expect("should start eth1 environment");
let endpoint2 = new_ganache_instance()
.await
.expect("should start eth1 environment");
let deposit_contract = &endpoint2.deposit_contract;
let initial_block_number = get_block_number(&endpoint2.web3()).await;
// Create some blocks and then consume them, performing the test `rounds` times.
let new_blocks = 4;
for _ in 0..new_blocks {
endpoint1
.ganache
.evm_mine()
.await
.expect("should mine block");
endpoint2
.ganache
.evm_mine()
.await
.expect("should mine block");
}
//additional blocks for endpoint1 to be able to distinguish
for _ in 0..new_blocks {
endpoint1
.ganache
.evm_mine()
.await
.expect("should mine block");
}
let service = Service::new(
Config {
endpoints: vec![
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
],
deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number,
follow_distance: 0,
..Config::default()
},
log.clone(),
MainnetEthSpec::default_spec(),
);
let endpoint1_block_number = get_block_number(&endpoint1.web3()).await;
let endpoint2_block_number = get_block_number(&endpoint2.web3()).await;
assert!(endpoint2_block_number < endpoint1_block_number);
//the call will fallback to endpoint2
service.update().await.expect("should update deposit cache");
assert_eq!(
service.deposits().read().last_processed_block.unwrap(),
endpoint2_block_number
);
}
.await;
}
#[tokio::test] #[tokio::test]
async fn test_fallback_when_wrong_chain_id() { async fn test_fallback_when_wrong_chain_id() {
async { async {
let log = null_logger(); let log = null_logger();
let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into();
let wrong_chain_id = correct_chain_id + 1; let wrong_chain_id = correct_chain_id + 1;
let endpoint1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), wrong_chain_id) let endpoint1 = GanacheEth1Instance::new(wrong_chain_id)
.await .await
.expect("should start eth1 environment"); .expect("should start eth1 environment");
let endpoint2 = new_ganache_instance() let endpoint2 = new_ganache_instance()
@ -1021,10 +977,10 @@ mod fallbacks {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![ endpoints: Eth1Endpoint::NoAuth(vec![
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
], ]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number, lowest_cached_block_number: initial_block_number,
follow_distance: 0, follow_distance: 0,
@ -1076,10 +1032,10 @@ mod fallbacks {
let service = Service::new( let service = Service::new(
Config { Config {
endpoints: vec![ endpoints: Eth1Endpoint::NoAuth(vec![
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
], ]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
lowest_cached_block_number: initial_block_number, lowest_cached_block_number: initial_block_number,
follow_distance: 0, follow_distance: 0,

View File

@ -16,14 +16,16 @@ reqwest = { version = "0.11.0", features = ["json","stream"] }
eth2_serde_utils = "0.1.1" eth2_serde_utils = "0.1.1"
serde_json = "1.0.58" serde_json = "1.0.58"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
eth1 = { path = "../eth1" }
warp = { version = "0.3.2", features = ["tls"] } warp = { version = "0.3.2", features = ["tls"] }
jsonwebtoken = "8" jsonwebtoken = "8"
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
bytes = "1.1.0" bytes = "1.1.0"
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }
hex = "0.4.2" hex = "0.4.2"
eth2_ssz = "0.4.1"
eth2_ssz_types = "0.2.2" eth2_ssz_types = "0.2.2"
eth2 = { path = "../../common/eth2" }
state_processing = { path = "../../consensus/state_processing" }
lru = "0.7.1" lru = "0.7.1"
exit-future = "0.2.0" exit-future = "0.2.0"
tree_hash = "0.4.1" tree_hash = "0.4.1"

View File

@ -1,7 +1,7 @@
use crate::engines::ForkChoiceState; use crate::engines::ForkChoiceState;
use async_trait::async_trait; use async_trait::async_trait;
use eth1::http::RpcError;
pub use ethers_core::types::Transaction; pub use ethers_core::types::Transaction;
use http::deposit_methods::RpcError;
pub use json_structures::TransitionConfigurationV1; pub use json_structures::TransitionConfigurationV1;
use reqwest::StatusCode; use reqwest::StatusCode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};

View File

@ -1,3 +1,5 @@
use std::path::PathBuf;
use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header}; use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header};
use rand::Rng; use rand::Rng;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -13,6 +15,7 @@ pub const JWT_SECRET_LENGTH: usize = 32;
pub enum Error { pub enum Error {
JWT(jsonwebtoken::errors::Error), JWT(jsonwebtoken::errors::Error),
InvalidToken, InvalidToken,
InvalidKey(String),
} }
impl From<jsonwebtoken::errors::Error> for Error { impl From<jsonwebtoken::errors::Error> for Error {
@ -57,6 +60,14 @@ impl JwtKey {
} }
} }
pub fn strip_prefix(s: &str) -> &str {
if let Some(stripped) = s.strip_prefix("0x") {
stripped
} else {
s
}
}
/// Contains the JWT secret and claims parameters. /// Contains the JWT secret and claims parameters.
pub struct Auth { pub struct Auth {
key: EncodingKey, key: EncodingKey,
@ -73,6 +84,28 @@ impl Auth {
} }
} }
/// Create a new `Auth` struct given the path to the file containing the hex
/// encoded jwt key.
pub fn new_with_path(
jwt_path: PathBuf,
id: Option<String>,
clv: Option<String>,
) -> Result<Self, Error> {
std::fs::read_to_string(&jwt_path)
.map_err(|e| {
Error::InvalidKey(format!(
"Failed to read JWT secret file {:?}, error: {:?}",
jwt_path, e
))
})
.and_then(|ref s| {
let secret_bytes = hex::decode(strip_prefix(s.trim_end()))
.map_err(|e| Error::InvalidKey(format!("Invalid hex string: {:?}", e)))?;
let secret = JwtKey::from_slice(&secret_bytes).map_err(Error::InvalidKey)?;
Ok(Self::new(secret, id, clv))
})
}
/// Generate a JWT token with `claims.iat` set to current time. /// Generate a JWT token with `claims.iat` set to current time.
pub fn generate_token(&self) -> Result<String, Error> { pub fn generate_token(&self) -> Result<String, Error> {
let claims = self.generate_claims_at_timestamp(); let claims = self.generate_claims_at_timestamp();

View File

@ -3,15 +3,16 @@
use super::*; use super::*;
use crate::auth::Auth; use crate::auth::Auth;
use crate::json_structures::*; use crate::json_structures::*;
use eth1::http::EIP155_ERROR_STR;
use reqwest::header::CONTENT_TYPE; use reqwest::header::CONTENT_TYPE;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde_json::json; use serde_json::json;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::time::Duration; use std::time::Duration;
use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock};
pub use deposit_log::{DepositLog, Log};
pub use reqwest::Client; pub use reqwest::Client;
const STATIC_ID: u32 = 1; const STATIC_ID: u32 = 1;
@ -48,6 +49,480 @@ pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2);
pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1";
pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2);
/// This error is returned during a `chainId` call by Geth.
pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block";
/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object.
pub mod deposit_log {
use ssz::Decode;
use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message;
use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes};
pub use eth2::lighthouse::DepositLog;
/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The
/// event bytes are formatted according to the Ethereum ABI.
const PUBKEY_START: usize = 192;
const PUBKEY_LEN: usize = 48;
const CREDS_START: usize = PUBKEY_START + 64 + 32;
const CREDS_LEN: usize = 32;
const AMOUNT_START: usize = CREDS_START + 32 + 32;
const AMOUNT_LEN: usize = 8;
const SIG_START: usize = AMOUNT_START + 32 + 32;
const SIG_LEN: usize = 96;
const INDEX_START: usize = SIG_START + 96 + 32;
const INDEX_LEN: usize = 8;
/// A reduced set of fields from an Eth1 contract log.
#[derive(Debug, PartialEq, Clone)]
pub struct Log {
pub block_number: u64,
pub data: Vec<u8>,
}
impl Log {
/// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`.
pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result<DepositLog, String> {
let bytes = &self.data;
let pubkey = bytes
.get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN)
.ok_or("Insufficient bytes for pubkey")?;
let withdrawal_credentials = bytes
.get(CREDS_START..CREDS_START + CREDS_LEN)
.ok_or("Insufficient bytes for withdrawal credential")?;
let amount = bytes
.get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN)
.ok_or("Insufficient bytes for amount")?;
let signature = bytes
.get(SIG_START..SIG_START + SIG_LEN)
.ok_or("Insufficient bytes for signature")?;
let index = bytes
.get(INDEX_START..INDEX_START + INDEX_LEN)
.ok_or("Insufficient bytes for index")?;
let deposit_data = DepositData {
pubkey: PublicKeyBytes::from_ssz_bytes(pubkey)
.map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?,
withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials)
.map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?,
amount: u64::from_ssz_bytes(amount)
.map_err(|e| format!("Invalid amount ssz: {:?}", e))?,
signature: SignatureBytes::from_ssz_bytes(signature)
.map_err(|e| format!("Invalid signature ssz: {:?}", e))?,
};
let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec)
.map_or(false, |(public_key, signature, msg)| {
signature.verify(&public_key, msg)
});
Ok(DepositLog {
deposit_data,
block_number: self.block_number,
index: u64::from_ssz_bytes(index)
.map_err(|e| format!("Invalid index ssz: {:?}", e))?,
signature_is_valid,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use types::{EthSpec, MainnetEthSpec};
/// The data from a deposit event, using the v0.8.3 version of the deposit contract.
pub const EXAMPLE_LOG: &[u8] = &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17,
3, 51, 6, 4, 158, 232, 82, 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77,
64, 213, 43, 52, 175, 154, 239, 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139,
120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, 30, 63, 215, 238, 113, 60,
132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, 119, 88, 51, 80, 101,
74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, 187, 22, 95, 4, 211,
245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, 149, 250, 251,
159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, 18, 113,
232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
#[test]
fn can_parse_example_log() {
let log = Log {
block_number: 42,
data: EXAMPLE_LOG.to_vec(),
};
log.to_deposit_log(&MainnetEthSpec::default_spec())
.expect("should decode log");
}
}
}
/// Contains subset of the HTTP JSON-RPC methods used to query an execution node for
/// state of the deposit contract.
pub mod deposit_methods {
use super::Log;
use crate::{EngineApi, HttpJsonRpc};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::fmt;
use std::ops::Range;
use std::str::FromStr;
use std::time::Duration;
use types::Hash256;
/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")`
pub const DEPOSIT_EVENT_TOPIC: &str =
"0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5";
/// `keccak("get_deposit_root()")[0..4]`
pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f";
/// `keccak("get_deposit_count()")[0..4]`
pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130";
/// Number of bytes in deposit contract deposit root response.
pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96;
/// Number of bytes in deposit contract deposit root (value only).
pub const DEPOSIT_ROOT_BYTES: usize = 32;
/// Represents an eth1 chain/network id.
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum Eth1Id {
Goerli,
Mainnet,
Custom(u64),
}
#[derive(Debug, PartialEq, Clone)]
pub struct Block {
pub hash: Hash256,
pub timestamp: u64,
pub number: u64,
}
/// Used to identify a block when querying the Eth1 node.
#[derive(Clone, Copy)]
pub enum BlockQuery {
Number(u64),
Latest,
}
impl Into<u64> for Eth1Id {
fn into(self) -> u64 {
match self {
Eth1Id::Mainnet => 1,
Eth1Id::Goerli => 5,
Eth1Id::Custom(id) => id,
}
}
}
impl From<u64> for Eth1Id {
fn from(id: u64) -> Self {
let into = |x: Eth1Id| -> u64 { x.into() };
match id {
id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet,
id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli,
id => Eth1Id::Custom(id),
}
}
}
impl FromStr for Eth1Id {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse::<u64>()
.map(Into::into)
.map_err(|e| format!("Failed to parse eth1 network id {}", e))
}
}
/// Represents an error received from a remote procecdure call.
#[derive(Debug, Serialize, Deserialize)]
pub enum RpcError {
NoResultField,
Eip155Error,
InvalidJson(String),
Error(String),
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RpcError::NoResultField => write!(f, "No result field in response"),
RpcError::Eip155Error => write!(f, "Not synced past EIP-155"),
RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e),
RpcError::Error(s) => write!(f, "{}", s),
}
}
}
impl From<RpcError> for String {
fn from(e: RpcError) -> String {
e.to_string()
}
}
/// Parses a `0x`-prefixed, **big-endian** hex string as a u64.
///
/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian.
/// Therefore, this function is only useful for numbers encoded by the JSON RPC.
///
/// E.g., `0x01 == 1`
fn hex_to_u64_be(hex: &str) -> Result<u64, String> {
u64::from_str_radix(strip_prefix(hex)?, 16)
.map_err(|e| format!("Failed to parse hex as u64: {:?}", e))
}
/// Parses a `0x`-prefixed, big-endian hex string as bytes.
///
/// E.g., `0x0102 == vec![1, 2]`
fn hex_to_bytes(hex: &str) -> Result<Vec<u8>, String> {
hex::decode(strip_prefix(hex)?)
.map_err(|e| format!("Failed to parse hex as bytes: {:?}", e))
}
/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present.
fn strip_prefix(hex: &str) -> Result<&str, String> {
if let Some(stripped) = hex.strip_prefix("0x") {
Ok(stripped)
} else {
Err("Hex string did not start with `0x`".to_string())
}
}
impl HttpJsonRpc<EngineApi> {
/// Get the eth1 chain id of the given endpoint.
pub async fn get_chain_id(&self, timeout: Duration) -> Result<Eth1Id, String> {
let chain_id: String = self
.rpc_request("eth_chainId", json!([]), timeout)
.await
.map_err(|e| format!("eth_chainId call failed {:?}", e))?;
hex_to_u64_be(chain_id.as_str()).map(|id| id.into())
}
/// Returns the current block number.
pub async fn get_block_number(&self, timeout: Duration) -> Result<u64, String> {
let response: String = self
.rpc_request("eth_blockNumber", json!([]), timeout)
.await
.map_err(|e| format!("eth_blockNumber call failed {:?}", e))?;
hex_to_u64_be(response.as_str())
.map_err(|e| format!("Failed to get block number: {}", e))
}
/// Gets a block hash by block number.
pub async fn get_block(
&self,
query: BlockQuery,
timeout: Duration,
) -> Result<Block, String> {
let query_param = match query {
BlockQuery::Number(block_number) => format!("0x{:x}", block_number),
BlockQuery::Latest => "latest".to_string(),
};
let params = json!([
query_param,
false // do not return full tx objects.
]);
let response: Value = self
.rpc_request("eth_getBlockByNumber", params, timeout)
.await
.map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?;
let hash: Vec<u8> = hex_to_bytes(
response
.get("hash")
.ok_or("No hash for block")?
.as_str()
.ok_or("Block hash was not string")?,
)?;
let hash: Hash256 = if hash.len() == 32 {
Hash256::from_slice(&hash)
} else {
return Err(format!("Block hash was not 32 bytes: {:?}", hash));
};
let timestamp = hex_to_u64_be(
response
.get("timestamp")
.ok_or("No timestamp for block")?
.as_str()
.ok_or("Block timestamp was not string")?,
)?;
let number = hex_to_u64_be(
response
.get("number")
.ok_or("No number for block")?
.as_str()
.ok_or("Block number was not string")?,
)?;
if number <= usize::max_value() as u64 {
Ok(Block {
hash,
timestamp,
number,
})
} else {
Err(format!("Block number {} is larger than a usize", number))
}
.map_err(|e| format!("Failed to get block number: {}", e))
}
/// Returns the value of the `get_deposit_count()` call at the given `address` for the given
/// `block_number`.
///
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
pub async fn get_deposit_count(
&self,
address: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<u64>, String> {
let result = self
.call(address, DEPOSIT_COUNT_FN_SIGNATURE, block_number, timeout)
.await?;
match result {
None => Err("Deposit root response was none".to_string()),
Some(bytes) => {
if bytes.is_empty() {
Ok(None)
} else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES {
let mut array = [0; 8];
array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]);
Ok(Some(u64::from_le_bytes(array)))
} else {
Err(format!(
"Deposit count response was not {} bytes: {:?}",
DEPOSIT_COUNT_RESPONSE_BYTES, bytes
))
}
}
}
}
/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`.
///
/// Assumes that the `address` has the same ABI as the eth2 deposit contract.
pub async fn get_deposit_root(
&self,
address: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<Hash256>, String> {
let result = self
.call(address, DEPOSIT_ROOT_FN_SIGNATURE, block_number, timeout)
.await?;
match result {
None => Err("Deposit root response was none".to_string()),
Some(bytes) => {
if bytes.is_empty() {
Ok(None)
} else if bytes.len() == DEPOSIT_ROOT_BYTES {
Ok(Some(Hash256::from_slice(&bytes)))
} else {
Err(format!(
"Deposit root response was not {} bytes: {:?}",
DEPOSIT_ROOT_BYTES, bytes
))
}
}
}
}
/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed
/// `hex_data`.
///
/// Returns bytes, if any.
async fn call(
&self,
address: &str,
hex_data: &str,
block_number: u64,
timeout: Duration,
) -> Result<Option<Vec<u8>>, String> {
let params = json! ([
{
"to": address,
"data": hex_data,
},
format!("0x{:x}", block_number)
]);
let response: Option<String> = self
.rpc_request("eth_call", params, timeout)
.await
.map_err(|e| format!("eth_call call failed {:?}", e))?;
response.map(|s| hex_to_bytes(&s)).transpose()
}
/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given
/// `block_height_range`.
///
/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not.
pub async fn get_deposit_logs_in_range(
&self,
address: &str,
block_height_range: Range<u64>,
timeout: Duration,
) -> Result<Vec<Log>, String> {
let params = json! ([{
"address": address,
"topics": [DEPOSIT_EVENT_TOPIC],
"fromBlock": format!("0x{:x}", block_height_range.start),
"toBlock": format!("0x{:x}", block_height_range.end),
}]);
let response: Value = self
.rpc_request("eth_getLogs", params, timeout)
.await
.map_err(|e| format!("eth_getLogs call failed {:?}", e))?;
response
.as_array()
.cloned()
.ok_or("'result' value was not an array")?
.into_iter()
.map(|value| {
let block_number = value
.get("blockNumber")
.ok_or("No block number field in log")?
.as_str()
.ok_or("Block number was not string")?;
let data = value
.get("data")
.ok_or("No block number field in log")?
.as_str()
.ok_or("Data was not string")?;
Ok(Log {
block_number: hex_to_u64_be(block_number)?,
data: hex_to_bytes(data)?,
})
})
.collect::<Result<Vec<Log>, String>>()
.map_err(|e| format!("Failed to get logs in range: {}", e))
}
}
}
pub struct HttpJsonRpc<T = EngineApi> { pub struct HttpJsonRpc<T = EngineApi> {
pub client: Client, pub client: Client,
pub url: SensitiveUrl, pub url: SensitiveUrl,
@ -117,6 +592,12 @@ impl<T> HttpJsonRpc<T> {
} }
} }
impl std::fmt::Display for HttpJsonRpc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}, auth={}", self.url, self.auth.is_some())
}
}
impl HttpJsonRpc<EngineApi> { impl HttpJsonRpc<EngineApi> {
pub async fn upcheck(&self) -> Result<(), Error> { pub async fn upcheck(&self) -> Result<(), Error> {
let result: serde_json::Value = self let result: serde_json::Value = self
@ -289,6 +770,7 @@ impl HttpJsonRpc<BuilderApi> {
Ok(response.into()) Ok(response.into())
} }
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::auth::JwtKey; use super::auth::JwtKey;

View File

@ -6,10 +6,10 @@
use crate::engine_api::Builder; use crate::engine_api::Builder;
use crate::engines::Builders; use crate::engines::Builders;
use auth::{Auth, JwtKey}; use auth::{strip_prefix, Auth, JwtKey};
use engine_api::Error as ApiError; use engine_api::Error as ApiError;
pub use engine_api::*; pub use engine_api::*;
pub use engine_api::{http, http::HttpJsonRpc}; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
pub use engines::ForkChoiceState; pub use engines::ForkChoiceState;
use engines::{Engine, EngineError, Engines, Logging}; use engines::{Engine, EngineError, Engines, Logging};
use lru::LruCache; use lru::LruCache;
@ -42,6 +42,9 @@ mod metrics;
mod payload_status; mod payload_status;
pub mod test_utils; pub mod test_utils;
/// Indicates the default jwt authenticated execution endpoint.
pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/";
/// Name for the default file used for the jwt secret. /// Name for the default file used for the jwt secret.
pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; pub const DEFAULT_JWT_FILE: &str = "jwt.hex";
@ -130,14 +133,6 @@ pub struct Config {
pub default_datadir: PathBuf, pub default_datadir: PathBuf,
} }
fn strip_prefix(s: &str) -> &str {
if let Some(stripped) = s.strip_prefix("0x") {
stripped
} else {
s
}
}
/// Provides access to one or more execution engines and provides a neat interface for consumption /// Provides access to one or more execution engines and provides a neat interface for consumption
/// by the `BeaconChain`. /// by the `BeaconChain`.
/// ///

View File

@ -112,7 +112,7 @@ impl Eth1GenesisService {
"Importing eth1 deposit logs"; "Importing eth1 deposit logs";
); );
let endpoints = eth1_service.init_endpoints(); let endpoints = eth1_service.init_endpoints()?;
loop { loop {
let update_result = eth1_service let update_result = eth1_service

View File

@ -3,6 +3,7 @@ mod eth1_genesis_service;
mod interop; mod interop;
pub use eth1::Config as Eth1Config; pub use eth1::Config as Eth1Config;
pub use eth1::Eth1Endpoint;
pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use eth1_genesis_service::{Eth1GenesisService, Statistics};
pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
pub use types::test_utils::generate_deterministic_keypairs; pub use types::test_utils::generate_deterministic_keypairs;

View File

@ -4,7 +4,7 @@
//! dir in the root of the `lighthouse` repo. //! dir in the root of the `lighthouse` repo.
#![cfg(test)] #![cfg(test)]
use environment::{Environment, EnvironmentBuilder}; use environment::{Environment, EnvironmentBuilder};
use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance};
use genesis::{Eth1Config, Eth1GenesisService}; use genesis::{Eth1Config, Eth1GenesisService};
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
@ -29,7 +29,7 @@ fn basic() {
let mut spec = env.eth2_config().spec.clone(); let mut spec = env.eth2_config().spec.clone();
env.runtime().block_on(async { env.runtime().block_on(async {
let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()) let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into())
.await .await
.expect("should start eth1 environment"); .expect("should start eth1 environment");
let deposit_contract = &eth1.deposit_contract; let deposit_contract = &eth1.deposit_contract;
@ -44,7 +44,10 @@ fn basic() {
let service = Eth1GenesisService::new( let service = Eth1GenesisService::new(
Eth1Config { Eth1Config {
endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
eth1.endpoint().as_str(),
)
.unwrap()]),
deposit_contract_address: deposit_contract.address(), deposit_contract_address: deposit_contract.address(),
deposit_contract_deploy_block: now, deposit_contract_deploy_block: now,
lowest_cached_block_number: now, lowest_cached_block_number: now,

View File

@ -409,45 +409,46 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.arg( .arg(
Arg::with_name("merge") Arg::with_name("merge")
.long("merge") .long("merge")
.help("Enable the features necessary to run merge testnets. This feature \ .help("Deprecated. The feature activates automatically when --execution-endpoint \
is unstable and is for developers only.") is supplied.")
.takes_value(false), .takes_value(false)
) )
.arg( .arg(
Arg::with_name("execution-endpoints") Arg::with_name("execution-endpoint")
.long("execution-endpoints") .long("execution-endpoint")
.value_name("EXECUTION-ENDPOINTS") .value_name("EXECUTION-ENDPOINT")
.help("One or more comma-delimited server endpoints for HTTP JSON-RPC connection. \ .alias("execution-endpoints")
If multiple endpoints are given the endpoints are used as fallback in the \ .help("Server endpoint for an execution layer jwt authenticated HTTP \
given order. Also enables the --merge flag. \ JSON-RPC connection. Uses the same endpoint to populate the \
If this flag is omitted and the --eth1-endpoints is supplied, those values \ deposit cache. Also enables the --merge flag.\
will be used. Defaults to http://127.0.0.1:8545.") If not provided, uses the default value of http://127.0.0.1:8551")
.takes_value(true)
.requires("execution-jwt")
)
.arg(
Arg::with_name("execution-jwt")
.long("execution-jwt")
.value_name("EXECUTION-JWT")
.alias("jwt-secrets")
.help("File path which contains the hex-encoded JWT secret for the \
execution endpoint provided in the --execution-endpoint flag.")
.takes_value(true) .takes_value(true)
) )
.arg( .arg(
Arg::with_name("jwt-secrets") Arg::with_name("execution-jwt-id")
.long("jwt-secrets") .long("execution-jwt-id")
.value_name("JWT-SECRETS") .value_name("EXECUTION-JWT-ID")
.help("One or more comma-delimited file paths which contain the corresponding hex-encoded \ .alias("jwt-id")
JWT secrets for each execution endpoint provided in the --execution-endpoints flag. \
The number of paths should be in the same order and strictly equal to the number \
of execution endpoints provided.")
.takes_value(true)
.requires("execution-endpoints")
)
.arg(
Arg::with_name("jwt-id")
.long("jwt-id")
.value_name("JWT-ID")
.help("Used by the beacon node to communicate a unique identifier to execution nodes \ .help("Used by the beacon node to communicate a unique identifier to execution nodes \
during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\
Set to empty by deafult") Set to empty by deafult")
.takes_value(true) .takes_value(true)
) )
.arg( .arg(
Arg::with_name("jwt-version") Arg::with_name("execution-jwt-version")
.long("jwt-version") .long("execution-jwt-version")
.value_name("JWT-VERSION") .value_name("EXECUTION-JWT-VERSION")
.alias("jwt-version")
.help("Used by the beacon node to communicate a client version to execution nodes \ .help("Used by the beacon node to communicate a client version to execution nodes \
during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\
Set to empty by deafult") Set to empty by deafult")
@ -461,14 +462,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
collected from any blocks produced by this node. Defaults to a junk \ collected from any blocks produced by this node. Defaults to a junk \
address whilst the merge is in development stages. THE DEFAULT VALUE \ address whilst the merge is in development stages. THE DEFAULT VALUE \
WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION")
.requires("merge") .requires("execution-endpoint")
.takes_value(true) .takes_value(true)
) )
.arg( .arg(
Arg::with_name("payload-builders") Arg::with_name("payload-builder")
.long("payload-builders") .long("payload-builder")
.alias("payload-builders")
.help("The URL of a service compatible with the MEV-boost API.") .help("The URL of a service compatible with the MEV-boost API.")
.requires("merge") .requires("execution-endpoint")
.takes_value(true) .takes_value(true)
) )

View File

@ -3,12 +3,14 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG;
use client::{ClientConfig, ClientGenesis}; use client::{ClientConfig, ClientGenesis};
use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR};
use environment::RuntimeContext; use environment::RuntimeContext;
use genesis::Eth1Endpoint;
use http_api::TlsConfig; use http_api::TlsConfig;
use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use slog::{info, warn, Logger}; use slog::{info, warn, Logger};
use std::cmp; use std::cmp;
use std::cmp::max; use std::cmp::max;
use std::fmt::Debug;
use std::fs; use std::fs;
use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -215,15 +217,18 @@ pub fn get_config<E: EthSpec>(
"msg" => "please use --eth1-endpoints instead" "msg" => "please use --eth1-endpoints instead"
); );
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
client_config.eth1.endpoints = vec![SensitiveUrl::parse(endpoint)
let endpoints = vec![SensitiveUrl::parse(endpoint)
.map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?];
client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints);
} else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") {
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
client_config.eth1.endpoints = endpoints let endpoints = endpoints
.split(',') .split(',')
.map(SensitiveUrl::parse) .map(SensitiveUrl::parse)
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?;
client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints);
} }
if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") {
@ -242,47 +247,79 @@ pub fn get_config<E: EthSpec>(
client_config.eth1.cache_follow_distance = Some(follow_distance); client_config.eth1.cache_follow_distance = Some(follow_distance);
} }
if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { if cli_args.is_present("merge") {
if cli_args.is_present("execution-endpoint") {
warn!(
log,
"The --merge flag is deprecated";
"info" => "the --execution-endpoint flag automatically enables this feature"
)
} else {
return Err("The --merge flag is deprecated. \
Supply a value to --execution-endpoint instead."
.into());
}
}
if let Some(endpoints) = cli_args.value_of("execution-endpoint") {
let mut el_config = execution_layer::Config::default(); let mut el_config = execution_layer::Config::default();
if let Some(endpoints) = cli_args.value_of("execution-endpoints") { // Always follow the deposit contract when there is an execution endpoint.
//
// This is wasteful for non-staking nodes as they have no need to process deposit contract
// logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or
// `--staking` flags, however that poses a risk to stakers since they cannot produce blocks
// without "eth1".
//
// The waste for non-staking nodes is relatively small so we err on the side of safety for
// stakers. The merge is already complicated enough.
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
el_config.execution_endpoints = endpoints
.split(',') // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied.
.map(SensitiveUrl::parse) let execution_endpoint =
.collect::<Result<_, _>>() parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?;
.map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?;
} else if cli_args.is_present("merge") { // Parse a single JWT secret, logging warnings if multiple are supplied.
el_config.execution_endpoints = client_config.eth1.endpoints.clone(); //
} // JWTs are required if `--execution-endpoint` is supplied.
let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?;
if let Some(endpoints) = cli_args.value_of("payload-builders") { let secret_file =
el_config.builder_endpoints = endpoints parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?;
.split(',')
.map(SensitiveUrl::parse) // Parse and set the payload builder, if any.
.collect::<Result<_, _>>() if let Some(endpoints) = cli_args.value_of("payload-builder") {
.map_err(|e| format!("payload-builders contains an invalid URL {:?}", e))?; let payload_builder =
} parse_only_one_value(endpoints, SensitiveUrl::parse, "--payload-builder", log)?;
el_config.builder_endpoints = vec![payload_builder];
if let Some(secrets) = cli_args.value_of("jwt-secrets") {
let secret_files: Vec<_> = secrets.split(',').map(PathBuf::from).collect();
if !secret_files.is_empty() && secret_files.len() != el_config.execution_endpoints.len()
{
return Err(format!(
"{} execution-endpoints supplied with {} jwt-secrets. Lengths \
must match or jwt-secrets must be empty.",
el_config.execution_endpoints.len(),
secret_files.len(),
));
}
el_config.secret_files = secret_files;
} }
// Set config values from parse values.
el_config.secret_files = vec![secret_file.clone()];
el_config.execution_endpoints = vec![execution_endpoint.clone()];
el_config.suggested_fee_recipient = el_config.suggested_fee_recipient =
clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?;
el_config.jwt_id = clap_utils::parse_optional(cli_args, "jwt-id")?; el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?;
el_config.jwt_version = clap_utils::parse_optional(cli_args, "jwt-version")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?;
el_config.default_datadir = client_config.data_dir.clone(); el_config.default_datadir = client_config.data_dir.clone();
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
// use `--execution-endpoint` instead. Also, log a deprecation warning.
if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") {
warn!(
log,
"Ignoring --eth1-endpoints flag";
"info" => "the value for --execution-endpoint will be used instead. \
--eth1-endpoints has been deprecated for post-merge configurations"
);
}
client_config.eth1.endpoints = Eth1Endpoint::Auth {
endpoint: execution_endpoint,
jwt_path: secret_file,
jwt_id: el_config.jwt_id.clone(),
jwt_version: el_config.jwt_version.clone(),
};
// Store the EL config in the client config.
client_config.execution_layer = Some(el_config); client_config.execution_layer = Some(el_config);
} }
@ -344,7 +381,6 @@ pub fn get_config<E: EthSpec>(
client_config.eth1.follow_distance = spec.eth1_follow_distance; client_config.eth1.follow_distance = spec.eth1_follow_distance;
client_config.eth1.node_far_behind_seconds = client_config.eth1.node_far_behind_seconds =
max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block;
client_config.eth1.network_id = spec.deposit_network_id.into();
client_config.eth1.chain_id = spec.deposit_chain_id.into(); client_config.eth1.chain_id = spec.deposit_chain_id.into();
client_config.eth1.set_block_cache_truncation::<E>(spec); client_config.eth1.set_block_cache_truncation::<E>(spec);
@ -844,3 +880,38 @@ pub fn get_slots_per_restore_point<E: EthSpec>(
Ok((default, false)) Ok((default, false))
} }
} }
/// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`.
///
/// If there is more than one value, log a warning. If there are no values, return an error.
pub fn parse_only_one_value<F, T, E>(
cli_value: &str,
parser: F,
flag_name: &str,
log: &Logger,
) -> Result<T, String>
where
F: Fn(&str) -> Result<T, E>,
E: Debug,
{
let values = cli_value
.split(',')
.map(parser)
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("{} contains an invalid value {:?}", flag_name, e))?;
if values.len() > 1 {
warn!(
log,
"Multiple values provided";
"info" => "multiple values are deprecated, only the first value will be used",
"count" => values.len(),
"flag" => flag_name
);
}
values
.into_iter()
.next()
.ok_or(format!("Must provide at least one value to {}", flag_name))
}

View File

@ -1,7 +1,7 @@
use clap::ArgMatches; use clap::ArgMatches;
use environment::Environment; use environment::Environment;
use eth2_network_config::Eth2NetworkConfig; use eth2_network_config::Eth2NetworkConfig;
use genesis::{Eth1Config, Eth1GenesisService}; use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService};
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use ssz::Encode; use ssz::Encode;
use std::cmp::max; use std::cmp::max;
@ -35,11 +35,12 @@ pub fn run<T: EthSpec>(
let mut config = Eth1Config::default(); let mut config = Eth1Config::default();
if let Some(v) = endpoints.clone() { if let Some(v) = endpoints.clone() {
config.endpoints = v let endpoints = v
.iter() .iter()
.map(|s| SensitiveUrl::parse(s)) .map(|s| SensitiveUrl::parse(s))
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?;
config.endpoints = Eth1Endpoint::NoAuth(endpoints);
} }
config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address);
config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block;

View File

@ -55,6 +55,7 @@ validator_dir = { path = "../common/validator_dir" }
slashing_protection = { path = "../validator_client/slashing_protection" } slashing_protection = { path = "../validator_client/slashing_protection" }
lighthouse_network = { path = "../beacon_node/lighthouse_network" } lighthouse_network = { path = "../beacon_node/lighthouse_network" }
sensitive_url = { path = "../common/sensitive_url" } sensitive_url = { path = "../common/sensitive_url" }
eth1 = { path = "../beacon_node/eth1" }
[[test]] [[test]]
name = "lighthouse_tests" name = "lighthouse_tests"

View File

@ -1,6 +1,7 @@
use beacon_node::ClientConfig as Config; use beacon_node::ClientConfig as Config;
use crate::exec::{CommandLineTestExec, CompletedTest}; use crate::exec::{CommandLineTestExec, CompletedTest};
use eth1::Eth1Endpoint;
use lighthouse_network::PeerId; use lighthouse_network::PeerId;
use std::fs::File; use std::fs::File;
use std::io::Write; use std::io::Write;
@ -66,7 +67,10 @@ fn staking_flag() {
.with_config(|config| { .with_config(|config| {
assert!(config.http_api.enabled); assert!(config.http_api.enabled);
assert!(config.sync_eth1_chain); assert!(config.sync_eth1_chain);
assert_eq!(config.eth1.endpoints[0].to_string(), DEFAULT_ETH1_ENDPOINT); assert_eq!(
config.eth1.endpoints.get_endpoints()[0].to_string(),
DEFAULT_ETH1_ENDPOINT
);
}); });
} }
@ -196,18 +200,21 @@ fn eth1_endpoints_flag() {
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| { .with_config(|config| {
assert_eq!( assert_eq!(
config.eth1.endpoints[0].full.to_string(), config.eth1.endpoints.get_endpoints()[0].full.to_string(),
"http://localhost:9545/" "http://localhost:9545/"
); );
assert_eq!( assert_eq!(
config.eth1.endpoints[0].to_string(), config.eth1.endpoints.get_endpoints()[0].to_string(),
"http://localhost:9545/" "http://localhost:9545/"
); );
assert_eq!( assert_eq!(
config.eth1.endpoints[1].full.to_string(), config.eth1.endpoints.get_endpoints()[1].full.to_string(),
"https://infura.io/secret" "https://infura.io/secret"
); );
assert_eq!(config.eth1.endpoints[1].to_string(), "https://infura.io/"); assert_eq!(
config.eth1.endpoints.get_endpoints()[1].to_string(),
"https://infura.io/"
);
assert!(config.sync_eth1_chain); assert!(config.sync_eth1_chain);
}); });
} }
@ -246,45 +253,107 @@ fn eth1_cache_follow_distance_manual() {
} }
// Tests for Bellatrix flags. // Tests for Bellatrix flags.
#[test] fn run_merge_execution_endpoints_flag_test(flag: &str) {
fn merge_flag() {
CommandLineTest::new()
.flag("merge", None)
.run_with_zero_port()
.with_config(|config| assert!(config.execution_layer.is_some()));
}
#[test]
fn merge_execution_endpoints_flag() {
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"];
let endpoints = urls // we don't support redundancy for execution-endpoints
.iter() // only the first provided endpoint is parsed.
.map(|s| SensitiveUrl::parse(s).unwrap())
.collect::<Vec<_>>();
let mut endpoint_arg = urls[0].to_string(); let mut endpoint_arg = urls[0].to_string();
for url in urls.into_iter().skip(1) { for url in urls.iter().skip(1) {
endpoint_arg.push(','); endpoint_arg.push(',');
endpoint_arg.push_str(url); endpoint_arg.push_str(url);
} }
let (_dirs, jwts): (Vec<_>, Vec<_>) = (0..2)
.map(|i| {
let dir = TempDir::new().expect("Unable to create temporary directory");
let path = dir.path().join(format!("jwt-{}", i));
(dir, path)
})
.unzip();
let mut jwts_arg = jwts[0].as_os_str().to_str().unwrap().to_string();
for jwt in jwts.iter().skip(1) {
jwts_arg.push(',');
jwts_arg.push_str(jwt.as_os_str().to_str().unwrap());
}
// this is way better but intersperse is still a nightly feature :/ // this is way better but intersperse is still a nightly feature :/
// let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); // let endpoint_arg: String = urls.into_iter().intersperse(",").collect();
CommandLineTest::new() CommandLineTest::new()
.flag("merge", None) .flag(flag, Some(&endpoint_arg))
.flag("execution-endpoints", Some(&endpoint_arg)) .flag("execution-jwt", Some(&jwts_arg))
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| { .with_config(|config| {
let config = config.execution_layer.as_ref().unwrap(); let config = config.execution_layer.as_ref().unwrap();
assert_eq!(config.execution_endpoints, endpoints) assert_eq!(config.execution_endpoints.len(), 1);
assert_eq!(
config.execution_endpoints[0],
SensitiveUrl::parse(&urls[0]).unwrap()
);
// Only the first secret file should be used.
assert_eq!(config.secret_files, vec![jwts[0].clone()]);
}); });
} }
#[test] #[test]
fn merge_execution_endpoints_flag() {
run_merge_execution_endpoints_flag_test("execution-endpoints")
}
#[test]
fn merge_execution_endpoint_flag() {
run_merge_execution_endpoints_flag_test("execution-endpoint")
}
fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) {
use sensitive_url::SensitiveUrl;
let eth1_endpoint = "http://bad.bad";
let execution_endpoint = "http://good.good";
assert!(eth1_endpoint != execution_endpoint);
let dir = TempDir::new().expect("Unable to create temporary directory");
let jwt_path = dir.path().join("jwt-file");
CommandLineTest::new()
.flag(eth1_flag, Some(&eth1_endpoint))
.flag(execution_flag, Some(&execution_endpoint))
.flag("execution-jwt", jwt_path.as_os_str().to_str())
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.execution_layer.as_ref().unwrap().execution_endpoints,
vec![SensitiveUrl::parse(execution_endpoint).unwrap()]
);
// The eth1 endpoint should have been set to the --execution-endpoint value in defiance
// of --eth1-endpoints.
assert_eq!(
config.eth1.endpoints,
Eth1Endpoint::Auth {
endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(),
jwt_path: jwt_path.clone(),
jwt_id: None,
jwt_version: None,
}
);
});
}
#[test]
fn execution_endpoints_overrides_eth1_endpoints() {
run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints");
}
#[test]
fn execution_endpoint_overrides_eth1_endpoint() {
run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint");
}
#[test]
fn merge_jwt_secrets_flag() { fn merge_jwt_secrets_flag() {
let dir = TempDir::new().expect("Unable to create temporary directory"); let dir = TempDir::new().expect("Unable to create temporary directory");
let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file");
file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33")
.expect("Unable to write to file"); .expect("Unable to write to file");
CommandLineTest::new() CommandLineTest::new()
.flag("merge", None)
.flag("execution-endpoints", Some("http://localhost:8551/")) .flag("execution-endpoints", Some("http://localhost:8551/"))
.flag( .flag(
"jwt-secrets", "jwt-secrets",
@ -302,8 +371,13 @@ fn merge_jwt_secrets_flag() {
} }
#[test] #[test]
fn merge_fee_recipient_flag() { fn merge_fee_recipient_flag() {
let dir = TempDir::new().expect("Unable to create temporary directory");
CommandLineTest::new() CommandLineTest::new()
.flag("merge", None) .flag("execution-endpoint", Some("http://meow.cats"))
.flag(
"execution-jwt",
dir.path().join("jwt-file").as_os_str().to_str(),
)
.flag( .flag(
"suggested-fee-recipient", "suggested-fee-recipient",
Some("0x00000000219ab540356cbb839cbe05303d7705fa"), Some("0x00000000219ab540356cbb839cbe05303d7705fa"),
@ -317,19 +391,74 @@ fn merge_fee_recipient_flag() {
); );
}); });
} }
#[test] fn run_payload_builder_flag_test(flag: &str, builders: &str) {
fn jwt_optional_flags() { use sensitive_url::SensitiveUrl;
let dir = TempDir::new().expect("Unable to create temporary directory");
let all_builders: Vec<_> = builders
.split(",")
.map(|builder| SensitiveUrl::parse(builder).expect("valid builder url"))
.collect();
CommandLineTest::new() CommandLineTest::new()
.flag("merge", None) .flag("execution-endpoint", Some("http://meow.cats"))
.flag("jwt-id", Some("bn-1")) .flag(
.flag("jwt-version", Some("Lighthouse-v2.1.3")) "execution-jwt",
dir.path().join("jwt-file").as_os_str().to_str(),
)
.flag(flag, Some(builders))
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| { .with_config(|config| {
let config = config.execution_layer.as_ref().unwrap(); let config = config.execution_layer.as_ref().unwrap();
assert_eq!(config.jwt_id, Some("bn-1".to_string())); // Only first provided endpoint is parsed as we don't support
assert_eq!(config.jwt_version, Some("Lighthouse-v2.1.3".to_string())); // redundancy.
assert_eq!(&config.builder_endpoints, &all_builders[..1]);
}); });
} }
#[test]
fn payload_builder_flags() {
run_payload_builder_flag_test("payload-builder", "http://meow.cats");
run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs");
run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs");
}
fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) {
use sensitive_url::SensitiveUrl;
let dir = TempDir::new().expect("Unable to create temporary directory");
let execution_endpoint = "http://meow.cats";
let jwt_file = "jwt-file";
let id = "bn-1";
let version = "Lighthouse-v2.1.3";
CommandLineTest::new()
.flag("execution-endpoint", Some(execution_endpoint.clone()))
.flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str())
.flag(jwt_id_flag, Some(id))
.flag(jwt_version_flag, Some(version))
.run_with_zero_port()
.with_config(|config| {
let el_config = config.execution_layer.as_ref().unwrap();
assert_eq!(el_config.jwt_id, Some(id.to_string()));
assert_eq!(el_config.jwt_version, Some(version.to_string()));
assert_eq!(
config.eth1.endpoints,
Eth1Endpoint::Auth {
endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(),
jwt_path: dir.path().join(jwt_file),
jwt_id: Some(id.to_string()),
jwt_version: Some(version.to_string()),
}
);
});
}
#[test]
fn jwt_optional_flags() {
run_jwt_optional_flags_test("execution-jwt", "execution-jwt-id", "execution-jwt-version");
}
#[test]
fn jwt_optional_alias_flags() {
run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version");
}
#[test] #[test]
fn terminal_total_difficulty_override_flag() { fn terminal_total_difficulty_override_flag() {
use beacon_node::beacon_chain::types::Uint256; use beacon_node::beacon_chain::types::Uint256;

View File

@ -11,5 +11,4 @@ exec ganache \
--mnemonic "$ETH1_NETWORK_MNEMONIC" \ --mnemonic "$ETH1_NETWORK_MNEMONIC" \
--port 8545 \ --port 8545 \
--blockTime $SECONDS_PER_ETH1_BLOCK \ --blockTime $SECONDS_PER_ETH1_BLOCK \
--networkId "$NETWORK_ID" \ --chain.chainId "$CHAIN_ID"
--chain.chainId "$NETWORK_ID"

View File

@ -32,7 +32,7 @@ lcli \
--genesis-delay $GENESIS_DELAY \ --genesis-delay $GENESIS_DELAY \
--genesis-fork-version $GENESIS_FORK_VERSION \ --genesis-fork-version $GENESIS_FORK_VERSION \
--altair-fork-epoch $ALTAIR_FORK_EPOCH \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \
--eth1-id $NETWORK_ID \ --eth1-id $CHAIN_ID \
--eth1-follow-distance 1 \ --eth1-follow-distance 1 \
--seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-slot $SECONDS_PER_SLOT \
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \

View File

@ -30,7 +30,7 @@ GENESIS_DELAY=0
BOOTNODE_PORT=4242 BOOTNODE_PORT=4242
# Network ID and Chain ID of local eth1 test network # Network ID and Chain ID of local eth1 test network
NETWORK_ID=4242 CHAIN_ID=4242
# Hard fork configuration # Hard fork configuration
ALTAIR_FORK_EPOCH=18446744073709551615 ALTAIR_FORK_EPOCH=18446744073709551615

View File

@ -30,7 +30,7 @@ GENESIS_DELAY=0
BOOTNODE_PORT=4242 BOOTNODE_PORT=4242
# Network ID and Chain ID of local eth1 test network # Network ID and Chain ID of local eth1 test network
NETWORK_ID=4242 CHAIN_ID=4242
# Hard fork configuration # Hard fork configuration
ALTAIR_FORK_EPOCH=18446744073709551615 ALTAIR_FORK_EPOCH=18446744073709551615

View File

@ -16,17 +16,11 @@ pub struct GanacheInstance {
pub port: u16, pub port: u16,
child: Child, child: Child,
pub web3: Web3<Http>, pub web3: Web3<Http>,
network_id: u64,
chain_id: u64, chain_id: u64,
} }
impl GanacheInstance { impl GanacheInstance {
fn new_from_child( fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result<Self, String> {
mut child: Child,
port: u16,
network_id: u64,
chain_id: u64,
) -> Result<Self, String> {
let stdout = child let stdout = child
.stdout .stdout
.ok_or("Unable to get stdout for ganache child process")?; .ok_or("Unable to get stdout for ganache child process")?;
@ -64,14 +58,13 @@ impl GanacheInstance {
port, port,
child, child,
web3, web3,
network_id,
chain_id, chain_id,
}) })
} }
/// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// Start a new `ganache` process, waiting until it indicates that it is ready to accept
/// RPC connections. /// RPC connections.
pub fn new(network_id: u64, chain_id: u64) -> Result<Self, String> { pub fn new(chain_id: u64) -> Result<Self, String> {
let port = unused_tcp_port()?; let port = unused_tcp_port()?;
let binary = match cfg!(windows) { let binary = match cfg!(windows) {
true => "ganache.cmd", true => "ganache.cmd",
@ -89,8 +82,6 @@ impl GanacheInstance {
.arg(format!("{}", port)) .arg(format!("{}", port))
.arg("--mnemonic") .arg("--mnemonic")
.arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"")
.arg("--networkId")
.arg(format!("{}", network_id))
.arg("--chain.chainId") .arg("--chain.chainId")
.arg(format!("{}", chain_id)) .arg(format!("{}", chain_id))
.spawn() .spawn()
@ -102,7 +93,7 @@ impl GanacheInstance {
) )
})?; })?;
Self::new_from_child(child, port, network_id, chain_id) Self::new_from_child(child, port, chain_id)
} }
pub fn fork(&self) -> Result<Self, String> { pub fn fork(&self) -> Result<Self, String> {
@ -128,7 +119,7 @@ impl GanacheInstance {
) )
})?; })?;
Self::new_from_child(child, port, self.network_id, self.chain_id) Self::new_from_child(child, port, self.chain_id)
} }
/// Returns the endpoint that this instance is listening on. /// Returns the endpoint that this instance is listening on.
@ -136,11 +127,6 @@ impl GanacheInstance {
endpoint(self.port) endpoint(self.port)
} }
/// Returns the network id of the ganache instance
pub fn network_id(&self) -> u64 {
self.network_id
}
/// Returns the chain id of the ganache instance /// Returns the chain id of the ganache instance
pub fn chain_id(&self) -> u64 { pub fn chain_id(&self) -> u64 {
self.chain_id self.chain_id

View File

@ -30,8 +30,8 @@ pub struct GanacheEth1Instance {
} }
impl GanacheEth1Instance { impl GanacheEth1Instance {
pub async fn new(network_id: u64, chain_id: u64) -> Result<Self, String> { pub async fn new(chain_id: u64) -> Result<Self, String> {
let ganache = GanacheInstance::new(network_id, chain_id)?; let ganache = GanacheInstance::new(chain_id)?;
DepositContract::deploy(ganache.web3.clone(), 0, None) DepositContract::deploy(ganache.web3.clone(), 0, None)
.await .await
.map(|deposit_contract| Self { .map(|deposit_contract| Self {

View File

@ -9,6 +9,7 @@ edition = "2021"
[dependencies] [dependencies]
node_test_rig = { path = "../node_test_rig" } node_test_rig = { path = "../node_test_rig" }
eth1 = {path = "../../beacon_node/eth1"} eth1 = {path = "../../beacon_node/eth1"}
execution_layer = {path = "../../beacon_node/execution_layer"}
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
parking_lot = "0.12.0" parking_lot = "0.12.0"
futures = "0.3.7" futures = "0.3.7"

View File

@ -1,9 +1,10 @@
use crate::local_network::INVALID_ADDRESS; use crate::local_network::INVALID_ADDRESS;
use crate::{checks, LocalNetwork, E}; use crate::{checks, LocalNetwork, E};
use clap::ArgMatches; use clap::ArgMatches;
use eth1::http::Eth1Id; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID};
use eth1_test_rig::GanacheEth1Instance; use eth1_test_rig::GanacheEth1Instance;
use execution_layer::http::deposit_methods::Eth1Id;
use futures::prelude::*; use futures::prelude::*;
use node_test_rig::{ use node_test_rig::{
environment::{EnvironmentBuilder, LoggerConfig}, environment::{EnvironmentBuilder, LoggerConfig},
@ -92,10 +93,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
* Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit
* validators. * validators.
*/ */
let ganache_eth1_instance = let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?;
GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await?;
let deposit_contract = ganache_eth1_instance.deposit_contract; let deposit_contract = ganache_eth1_instance.deposit_contract;
let network_id = ganache_eth1_instance.ganache.network_id();
let chain_id = ganache_eth1_instance.ganache.chain_id(); let chain_id = ganache_eth1_instance.ganache.chain_id();
let ganache = ganache_eth1_instance.ganache; let ganache = ganache_eth1_instance.ganache;
let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str())
@ -124,7 +123,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
let mut beacon_config = testing_client_config(); let mut beacon_config = testing_client_config();
beacon_config.genesis = ClientGenesis::DepositContract; beacon_config.genesis = ClientGenesis::DepositContract;
beacon_config.eth1.endpoints = vec![eth1_endpoint]; beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]);
beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_address = deposit_contract_address;
beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.deposit_contract_deploy_block = 0;
beacon_config.eth1.lowest_cached_block_number = 0; beacon_config.eth1.lowest_cached_block_number = 0;
@ -133,7 +132,6 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
beacon_config.dummy_eth1_backend = false; beacon_config.dummy_eth1_backend = false;
beacon_config.sync_eth1_chain = true; beacon_config.sync_eth1_chain = true;
beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64;
beacon_config.eth1.network_id = Eth1Id::from(network_id);
beacon_config.eth1.chain_id = Eth1Id::from(chain_id); beacon_config.eth1.chain_id = Eth1Id::from(chain_id);
beacon_config.network.target_peers = node_count - 1; beacon_config.network.target_peers = node_count - 1;
@ -150,10 +148,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
for i in 0..node_count - 1 { for i in 0..node_count - 1 {
let mut config = beacon_config.clone(); let mut config = beacon_config.clone();
if i % 2 == 0 { if i % 2 == 0 {
config.eth1.endpoints.insert( if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints {
endpoints.insert(
0, 0,
SensitiveUrl::parse(INVALID_ADDRESS).expect("Unable to parse invalid address"), SensitiveUrl::parse(INVALID_ADDRESS)
); .expect("Unable to parse invalid address"),
)
}
} }
network.add_beacon_node(config).await?; network.add_beacon_node(config).await?;
} }