Validator client refactor (#618)

* Update to spec v0.9.0

* Update to v0.9.1

* Bump spec tags for v0.9.1

* Formatting, fix CI failures

* Resolve accidental KeyPair merge conflict

* Document new BeaconState functions

* Add `validator` changes from `validator-to-rest`

* Add initial (failing) REST api tests

* Fix signature parsing

* Add more tests

* Refactor http router

* Add working tests for publish beacon block

* Add validator duties tests

* Move account_manager under `lighthouse` binary

* Unify logfile handling in `environment` crate.

* Fix incorrect cache drops in `advance_caches`

* Update fork choice for v0.9.1

* Add `deposit_contract` crate

* Add progress on validator onboarding

* Add unfinished attesation code

* Update account manager CLI

* Write eth1 data file as hex string

* Integrate ValidatorDirectory with validator_client

* Move ValidatorDirectory into validator_client

* Clean up some FIXMEs

* Add beacon_chain_sim

* Fix a few docs/logs

* Expand `beacon_chain_sim`

* Fix spec for `beacon_chain_sim

* More testing for api

* Start work on attestation endpoint

* Reject empty attestations

* Allow attestations to genesis block

* Add working tests for `rest_api` validator endpoint

* Remove grpc from beacon_node

* Start heavy refactor of validator client

- Block production is working

* Prune old validator client files

* Start works on attestation service

* Add attestation service to validator client

* Use full pubkey for validator directories

* Add validator duties post endpoint

* Use par_iter for keypair generation

* Use bulk duties request in validator client

* Add version http endpoint tests

* Add interop keys and startup wait

* Ensure a prompt exit

* Add duties pruning

* Fix compile error in beacon node tests

* Add github workflow

* Modify rust.yaml

* Modify gitlab actions

* Add to CI file

* Add sudo to CI npm install

* Move cargo fmt to own job in tests

* Fix cargo fmt in CI

* Add rustup update before cargo fmt

* Change name of CI job

* Make other CI jobs require cargo fmt

* Add CI badge

* Remove gitlab and travis files

* Add different http timeout for debug

* Update docker file, use makefile in CI

* Use make in the dockerfile, skip the test

* Use the makefile for debug GI test

* Update book

* Tidy grpc and misc things

* Apply discv5 fixes

* Address other minor issues

* Fix warnings

* Attempt fix for addr parsing

* Tidy validator config, CLIs

* Tidy comments

* Tidy signing, reduce ForkService duplication

* Fail if skipping too many slots

* Set default recent genesis time to 0

* Add custom http timeout to validator

* Fix compile bug in node_test_rig

* Remove old bootstrap flag from val CLI

* Update docs

* Tidy val client

* Change val client log levels

* Add comments, more validity checks

* Fix compile error, add comments

* Undo changes to eth2-libp2p/src

* Reduce duplication of keypair generation

* Add more logging for validator duties

* Fix beacon_chain_sim, nitpicks

* Fix compile error, minor nits

* Address Michael's comments
This commit is contained in:
Paul Hauner 2019-11-25 15:48:24 +11:00 committed by GitHub
parent 3ca63cfa83
commit 78d82d9193
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 4571 additions and 4032 deletions

2
.dockerignore Normal file
View File

@ -0,0 +1,2 @@
tests/ef_tests/eth2.0-spec-tests
target/

45
.github/workflows/test-suite.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: test-suite
on: [push]
jobs:
cargo-fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Get latest version of stable Rust
run: rustup update stable
- name: Check formatting with cargo fmt
run: make cargo-fmt
release-tests-ubuntu:
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Install ganache-cli
run: sudo npm install -g ganache-cli
- name: Run tests in release
run: make test-release
debug-tests-ubuntu:
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Install ganache-cli
run: sudo npm install -g ganache-cli
- name: Run tests in debug
run: make test-debug
ef-tests-ubuntu:
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Run eth2.0-spec-tests with and without fake_crypto
run: make test-ef
dockerfile-ubuntu:
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- name: Build the root Dockerfile
run: docker build .

View File

@ -1,60 +0,0 @@
#Adapted from https://users.rust-lang.org/t/my-gitlab-config-docs-tests/16396
default:
image: 'sigp/lighthouse:latest'
cache:
paths:
- tests/ef_tests/*-v0.9.1.tar.gz
stages:
- test
- document
variables:
CARGO_HOME: /cache/cargocache
check-fmt:
stage: test
script:
- cargo build --manifest-path protos/Cargo.toml
- cargo fmt --all -- --check
test-dev:
stage: test
variables:
GIT_SUBMODULE_STRATEGY: normal
script:
- cargo test --verbose --all
test-release:
stage: test
variables:
GIT_SUBMODULE_STRATEGY: normal
script:
- cargo test --verbose --all --release
test-ef:
stage: test
variables:
GIT_SUBMODULE_STRATEGY: normal
script:
- make make-ef-tests
- cargo test --manifest-path tests/ef_tests/Cargo.toml --release --features ef_tests
test-ef-fake-crypto:
stage: test
variables:
GIT_SUBMODULE_STRATEGY: normal
script:
- make make-ef-tests
- cargo test --manifest-path tests/ef_tests/Cargo.toml --release --features ef_tests,fake_crypto
documentation:
stage: document
script:
- cargo doc --no-deps
- aws s3 sync target/doc/ s3://lighthouse-docs.sigmaprime.io/ --exclude '.lock' --delete
# Configure the below when we want to have a default page (and update S3 bucket index).
# - echo '<meta http-equiv="refresh" content="0; url={{ LIBRARY NAME }}">' > public/index.html
only:
- master

View File

@ -1,24 +0,0 @@
language: rust
cache:
directories:
- /home/travis/.cargo
before_install:
- curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip
- unzip protoc-3.4.0-linux-x86_64.zip -d protoc3
- sudo mv protoc3/bin/* /usr/local/bin/
- sudo mv protoc3/include/* /usr/local/include/
- sudo chown $USER /usr/local/bin/protoc
- sudo chown -R $USER /usr/local/include/google
script:
- cargo build --verbose --all --release
- cargo fmt --all -- --check
rust:
- beta
- nightly
matrix:
allow_failures:
- rust: beta
- rust: nightly
fast_finish: true
install:
- rustup component add rustfmt

View File

@ -7,6 +7,7 @@ members = [
"eth2/utils/bls",
"eth2/utils/compare_fields",
"eth2/utils/compare_fields_derive",
"eth2/utils/deposit_contract",
"eth2/utils/eth2_config",
"eth2/utils/eth2_interop_keypairs",
"eth2/utils/logging",
@ -31,16 +32,15 @@ members = [
"beacon_node/rest_api",
"beacon_node/network",
"beacon_node/eth2-libp2p",
"beacon_node/rpc",
"beacon_node/version",
"beacon_node/eth1",
"beacon_node/beacon_chain",
"beacon_node/websocket_server",
"tests/beacon_chain_sim",
"tests/ef_tests",
"tests/eth1_test_rig",
"tests/node_test_rig",
"lcli",
"protos",
"validator_client",
"account_manager",
"lighthouse",

View File

@ -1,29 +1,4 @@
FROM rust:latest
RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool awscli software-properties-common
RUN add-apt-repository -y ppa:git-core/ppa
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
RUN apt-get install -y git-lfs
RUN git clone https://github.com/google/protobuf.git && \
cd protobuf && \
./autogen.sh && \
./configure && \
make && \
make install && \
ldconfig && \
make clean && \
cd .. && \
rm -r protobuf
RUN apt-get install -y nodejs npm
RUN npm install -g ganache-cli --unsafe-perm
RUN mkdir -p /cache/cargocache && chmod -R ugo+rwX /cache/cargocache
ENV CARGO_HOME /cache/cargocache
RUN rustup component add rustfmt clippy
COPY . lighthouse
RUN cd lighthouse && make

View File

@ -5,24 +5,37 @@ EF_TESTS = "tests/ef_tests"
# Builds the entire workspace in release (optimized).
#
# Binaries will most likely be found in `./target/release`
release:
cargo build --release --all
install:
cargo install --path lighthouse --force
# Runs the full workspace tests, without downloading any additional test
# Runs the full workspace tests in **release**, without downloading any additional
# test vectors.
test-release:
cargo test --all --release --exclude ef_tests
# Runs the full workspace tests in **debug**, without downloading any additional test
# vectors.
test:
cargo test --all --all-features --release --exclude ef_tests
test-debug:
cargo test --all --exclude ef_tests
# Runs cargo-fmt (linter).
cargo-fmt:
cargo fmt --all -- --check
# only run the ef-test vectors
run-ef-tests:
# Runs only the ef-test vectors.
run-ef-tests:
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto"
# Downloads and runs the EF test vectors.
test-ef: make-ef-tests run-ef-tests
# Runs the entire test suite, downloading test vectors if required.
test-full: test test-ef
# Runs the full workspace tests in release, without downloading any additional
# test vectors.
test: test-release
# Runs the entire test suite, downloading test vectors if required.
test-full: cargo-fmt test-release test-debug test-ef
# Runs the makefile in the `ef_tests` repo.
#

View File

@ -4,8 +4,8 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
[![Build Status]][Build Link] [![Book Status]][Book Link] [![RustDoc Status]][RustDoc Link] [![Chat Badge]][Chat Link] [![Swagger Badge]][Swagger Link]
[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg
[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines
[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg
[Build Link]: https://github.com/sigp/lighthouse/actions
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
[Chat Link]: https://discord.gg/cyAszAh
[Book Status]:https://img.shields.io/badge/user--docs-master-informational

View File

@ -1,16 +1,25 @@
[package]
name = "account_manager"
version = "0.0.1"
authors = ["Luke Anderson <luke@sigmaprime.io>"]
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
edition = "2018"
[dev-dependencies]
tempdir = "0.3"
[dependencies]
bls = { path = "../eth2/utils/bls" }
clap = "2.33.0"
slog = "2.5.2"
slog-term = "2.4.2"
slog-async = "2.3.0"
validator_client = { path = "../validator_client" }
types = { path = "../eth2/types" }
dirs = "2.0.2"
environment = { path = "../lighthouse/environment" }
deposit_contract = { path = "../eth2/utils/deposit_contract" }
libc = "0.2.65"
eth2_ssz = { path = "../eth2/utils/ssz" }
eth2_ssz_derive = { path = "../eth2/utils/ssz_derive" }
hex = "0.4"
validator_client = { path = "../validator_client" }
rayon = "1.2.0"

View File

@ -1,54 +1,47 @@
use clap::{App, Arg, SubCommand};
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("Account Manager")
.visible_aliases(&["am", "accounts", "accounts_manager"])
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>")
.about("Eth 2.0 Accounts Manager")
.arg(
Arg::with_name("logfile")
.long("logfile")
.value_name("logfile")
.help("File path where output will be written.")
.takes_value(true),
)
.arg(
Arg::with_name("datadir")
.long("datadir")
.short("d")
.value_name("DIR")
.help("Data directory for keys and databases.")
.takes_value(true),
)
App::new("account_manager")
.visible_aliases(&["a", "am", "account", "account_manager"])
.about("Utilities for generating and managing Ethereum 2.0 accounts.")
.subcommand(
SubCommand::with_name("generate")
.about("Generates a new validator private key")
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>"),
)
.subcommand(
SubCommand::with_name("generate_deterministic")
.about("Generates a deterministic validator private key FOR TESTING")
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>")
.arg(
Arg::with_name("validator index")
.long("index")
.short("i")
.value_name("index")
.help("The index of the validator, for which the test key is generated")
.takes_value(true)
.required(true),
SubCommand::with_name("validator")
.about("Generate or manage Etheruem 2.0 validators.")
.subcommand(
SubCommand::with_name("new")
.about("Create a new Ethereum 2.0 validator.")
.subcommand(
SubCommand::with_name("insecure")
.about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.")
.arg(
Arg::with_name("first")
.index(1)
.value_name("INDEX")
.help("Index of the first validator")
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name("last")
.index(2)
.value_name("INDEX")
.help("Index of the first validator")
.takes_value(true)
.required(true),
),
)
.subcommand(
SubCommand::with_name("random")
.about("Produces public keys using entropy from the Rust 'rand' library.")
.arg(
Arg::with_name("validator_count")
.index(1)
.value_name("INTEGER")
.help("The number of new validators to generate.")
.takes_value(true)
.default_value("1"),
),
)
)
.arg(
Arg::with_name("validator count")
.long("validator_count")
.short("n")
.value_name("validator_count")
.help("If supplied along with `index`, generates keys `i..i + n`.")
.takes_value(true)
.default_value("1"),
),
)
}

View File

@ -1,122 +1,150 @@
mod cli;
use bls::Keypair;
use clap::ArgMatches;
use environment::RuntimeContext;
use slog::{crit, debug, info};
use rayon::prelude::*;
use slog::{crit, info};
use std::fs;
use std::path::PathBuf;
use types::{test_utils::generate_deterministic_keypair, EthSpec};
use validator_client::Config as ValidatorClientConfig;
use types::{ChainSpec, EthSpec};
use validator_client::validator_directory::{ValidatorDirectory, ValidatorDirectoryBuilder};
pub use cli::cli_app;
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator";
pub const CLIENT_CONFIG_FILENAME: &str = "account-manager.toml";
/// Run the account manager, logging an error if the operation did not succeed.
pub fn run<T: EthSpec>(matches: &ArgMatches, context: RuntimeContext<T>) {
let mut log = context.log;
let log = context.log.clone();
match run_account_manager(matches, context) {
Ok(()) => (),
Err(e) => crit!(log, "Account manager failed"; "error" => e),
}
}
let data_dir = match matches
/// Run the account manager, returning an error if the operation did not succeed.
fn run_account_manager<T: EthSpec>(
matches: &ArgMatches,
context: RuntimeContext<T>,
) -> Result<(), String> {
let log = context.log.clone();
let datadir = matches
.value_of("datadir")
.and_then(|v| Some(PathBuf::from(v)))
{
Some(v) => v,
None => {
// use the default
.map(PathBuf::from)
.unwrap_or_else(|| {
let mut default_dir = match dirs::home_dir() {
Some(v) => v,
None => {
crit!(log, "Failed to find a home directory");
return;
panic!("Failed to find a home directory");
}
};
default_dir.push(DEFAULT_DATA_DIR);
default_dir.push(".lighthouse");
default_dir.push("validators");
default_dir
}
};
});
// create the directory if needed
match fs::create_dir_all(&data_dir) {
Ok(_) => {}
Err(e) => {
crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e));
return;
}
}
fs::create_dir_all(&datadir).map_err(|e| format!("Failed to initialize datadir: {}", e))?;
let mut client_config = ValidatorClientConfig::default();
// Ensure the `data_dir` in the config matches that supplied to the CLI.
client_config.data_dir = data_dir.clone();
if let Err(e) = client_config.apply_cli_args(&matches, &mut log) {
crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => format!("{:?}", e));
return;
};
// Log configuration
info!(log, "";
"data_dir" => &client_config.data_dir.to_str());
info!(
log,
"Located data directory";
"path" => format!("{:?}", datadir)
);
match matches.subcommand() {
("generate", Some(_)) => generate_random(&client_config, &log),
("generate_deterministic", Some(m)) => {
if let Some(string) = m.value_of("validator index") {
let i: usize = string.parse().expect("Invalid validator index");
if let Some(string) = m.value_of("validator count") {
let n: usize = string.parse().expect("Invalid end validator count");
let indices: Vec<usize> = (i..i + n).collect();
generate_deterministic_multiple(&indices, &client_config, &log)
} else {
generate_deterministic(i, &client_config, &log)
}
("validator", Some(matches)) => match matches.subcommand() {
("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, context)?,
_ => {
return Err("Invalid 'validator new' command. See --help.".to_string());
}
},
_ => {
return Err("Invalid 'validator' command. See --help.".to_string());
}
}
Ok(())
}
/// Describes the crypto key generation methods for a validator.
enum KeygenMethod {
/// Produce an insecure "deterministic" keypair. Used only for interop and testing.
Insecure(usize),
/// Generate a new key from the `rand` thread random RNG.
ThreadRandom,
}
/// Process the subcommand for creating new validators.
fn run_new_validator_subcommand<T: EthSpec>(
matches: &ArgMatches,
datadir: PathBuf,
context: RuntimeContext<T>,
) -> Result<(), String> {
let log = context.log.clone();
let methods: Vec<KeygenMethod> = match matches.subcommand() {
("insecure", Some(matches)) => {
let first = matches
.value_of("first")
.ok_or_else(|| "No first index".to_string())?
.parse::<usize>()
.map_err(|e| format!("Unable to parse first index: {}", e))?;
let last = matches
.value_of("last")
.ok_or_else(|| "No last index".to_string())?
.parse::<usize>()
.map_err(|e| format!("Unable to parse first index: {}", e))?;
(first..last).map(KeygenMethod::Insecure).collect()
}
("random", Some(matches)) => {
let count = matches
.value_of("validator_count")
.ok_or_else(|| "No validator count".to_string())?
.parse::<usize>()
.map_err(|e| format!("Unable to parse validator count: {}", e))?;
(0..count).map(|_| KeygenMethod::ThreadRandom).collect()
}
_ => {
crit!(
log,
"The account manager must be run with a subcommand. See help for more information."
);
return Err("Invalid 'validator' command. See --help.".to_string());
}
}
}
};
fn generate_random(config: &ValidatorClientConfig, log: &slog::Logger) {
save_key(&Keypair::random(), config, log)
}
let validators = make_validators(datadir.clone(), &methods, context.eth2_config.spec)?;
fn generate_deterministic_multiple(
validator_indices: &[usize],
config: &ValidatorClientConfig,
log: &slog::Logger,
) {
for validator_index in validator_indices {
generate_deterministic(*validator_index, config, log)
}
}
fn generate_deterministic(
validator_index: usize,
config: &ValidatorClientConfig,
log: &slog::Logger,
) {
save_key(
&generate_deterministic_keypair(validator_index),
config,
info!(
log,
)
}
fn save_key(keypair: &Keypair, config: &ValidatorClientConfig, log: &slog::Logger) {
let key_path: PathBuf = config
.save_key(&keypair)
.expect("Unable to save newly generated private key.");
debug!(
log,
"Keypair generated {:?}, saved to: {:?}",
keypair.identifier(),
key_path.to_string_lossy()
"Generated validator directories";
"base_path" => format!("{:?}", datadir),
"count" => validators.len(),
);
Ok(())
}
/// Produces a validator directory for each of the key generation methods provided in `methods`.
fn make_validators(
datadir: PathBuf,
methods: &[KeygenMethod],
spec: ChainSpec,
) -> Result<Vec<ValidatorDirectory>, String> {
methods
.par_iter()
.map(|method| {
let mut builder = ValidatorDirectoryBuilder::default()
.spec(spec.clone())
.full_deposit_amount()?;
builder = match method {
KeygenMethod::Insecure(index) => builder.insecure_keypairs(*index),
KeygenMethod::ThreadRandom => builder.thread_random_keypairs(),
};
builder
.create_directory(datadir.clone())?
.write_keypair_files()?
.write_eth1_data_file()?
.build()
})
.collect()
}

View File

@ -28,6 +28,7 @@ sloggers = "0.3.4"
slot_clock = { path = "../../eth2/utils/slot_clock" }
eth2_hashing = "0.1.0"
eth2_ssz = "0.1.2"
eth2_ssz_types = { path = "../../eth2/utils/ssz_types" }
eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../eth2/state_processing" }
tree_hash = "0.1.0"

View File

@ -10,7 +10,7 @@ use lmd_ghost::LmdGhost;
use operation_pool::DepositInsertStatus;
use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::RwLock;
use slog::{debug, error, info, trace, warn, Logger};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use state_processing::per_block_processing::{
@ -44,6 +44,7 @@ pub const GRAFFITI: &str = "sigp/lighthouse-0.0.0-prerelease";
const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files");
const BLOCK_SKIPPING_LOGGING_THRESHOLD: u64 = 3;
const BLOCK_SKIPPING_FAILURE_THRESHOLD: u64 = 128;
#[derive(Debug, PartialEq)]
pub enum BlockProcessingOutcome {
@ -74,6 +75,7 @@ pub enum BlockProcessingOutcome {
#[derive(Debug, PartialEq)]
pub enum AttestationProcessingOutcome {
Processed,
EmptyAggregationBitfield,
UnknownHeadBlock {
beacon_block_root: Hash256,
},
@ -175,7 +177,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<Vec<BeaconBlockBody<T::EthSpec>>, Error> {
let bodies: Result<Vec<_>, _> = roots
.iter()
.map(|root| match self.get_block(root)? {
.map(|root| match self.block_at_root(*root)? {
Some(block) => Ok(block.body),
None => Err(Error::DBInconsistent(format!("Missing block: {}", root))),
})
@ -190,7 +192,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn get_block_headers(&self, roots: &[Hash256]) -> Result<Vec<BeaconBlockHeader>, Error> {
let headers: Result<Vec<BeaconBlockHeader>, _> = roots
.iter()
.map(|root| match self.get_block(root)? {
.map(|root| match self.block_at_root(*root)? {
Some(block) => Ok(block.block_header()),
None => Err(Error::DBInconsistent("Missing block".into())),
})
@ -274,6 +276,36 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ReverseStateRootIterator::new((head.beacon_state_root, slot), iter)
}
/// Returns the block at the given root, if any.
///
/// ## Errors
///
/// May return a database error.
pub fn block_at_root(
&self,
block_root: Hash256,
) -> Result<Option<BeaconBlock<T::EthSpec>>, Error> {
Ok(self.store.get(&block_root)?)
}
/// Returns the block at the given slot, if any. Only returns blocks in the canonical chain.
///
/// ## Errors
///
/// May return a database error.
pub fn block_at_slot(&self, slot: Slot) -> Result<Option<BeaconBlock<T::EthSpec>>, Error> {
let root = self
.rev_iter_block_roots()
.find(|(_, this_slot)| *this_slot == slot)
.map(|(root, _)| root);
if let Some(block_root) = root {
Ok(self.store.get(&block_root)?)
} else {
Ok(None)
}
}
/// Returns the block at the given root, if any.
///
/// ## Errors
@ -318,7 +350,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if slot == head_state.slot {
Ok(head_state)
} else if slot > head_state.slot {
if slot > head_state.slot + BLOCK_SKIPPING_LOGGING_THRESHOLD {
// It is presently very resource intensive (lots of hashing) to skip slots.
//
// We log warnings or simply fail if there are too many skip slots. This is a
// protection against DoS attacks.
if slot > head_state.slot + BLOCK_SKIPPING_FAILURE_THRESHOLD {
crit!(
self.log,
"Refusing to skip more than {} blocks", BLOCK_SKIPPING_LOGGING_THRESHOLD;
"head_slot" => head_state.slot,
"request_slot" => slot
);
return Err(Error::StateSkipTooLarge {
head_slot: head_state.slot,
requested_slot: slot,
});
} else if slot > head_state.slot + BLOCK_SKIPPING_LOGGING_THRESHOLD {
warn!(
self.log,
"Skipping more than {} blocks", BLOCK_SKIPPING_LOGGING_THRESHOLD;
@ -326,6 +374,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
"request_slot" => slot
)
}
let head_state_slot = head_state.slot;
let mut state = head_state;
while state.slot < slot {
@ -476,6 +525,33 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}
/// Produce an `Attestation` that is valid for the given `slot` and `index`.
///
/// Always attests to the canonical chain.
pub fn produce_attestation(
&self,
slot: Slot,
index: CommitteeIndex,
) -> Result<Attestation<T::EthSpec>, Error> {
let state = self.state_at_slot(slot)?;
let head = self.head();
let data = self.produce_attestation_data_for_block(
index,
head.beacon_block_root,
head.beacon_block.slot,
&state,
)?;
let committee_len = state.get_beacon_committee(slot, index)?.committee.len();
Ok(Attestation {
aggregation_bits: BitList::with_capacity(committee_len)?,
data,
signature: AggregateSignature::new(),
})
}
/// Produce an `AttestationData` that is valid for the given `slot`, `index`.
///
/// Always attests to the canonical chain.
@ -635,6 +711,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS);
let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES);
if attestation.aggregation_bits.num_set_bits() == 0 {
return Ok(AttestationProcessingOutcome::EmptyAggregationBitfield);
}
// From the store, load the attestation's "head block".
//
// An honest validator would have set this block to be the head of the chain (i.e., the
@ -790,13 +870,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
result
};
if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) {
if block.slot > 0 && block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch())
{
// Ignore any attestation where the slot of `data.beacon_block_root` is equal to or
// prior to the finalized epoch.
//
// For any valid attestation if the `beacon_block_root` is prior to finalization, then
// all other parameters (source, target, etc) must all be prior to finalization and
// therefore no longer interesting.
//
// We allow the case where the block is the genesis block. Without this, all
// attestations prior to the first block being produced would be invalid.
Ok(AttestationProcessingOutcome::FinalizedSlot {
attestation: block.slot.epoch(T::EthSpec::slots_per_epoch()),
finalized: finalized_epoch,

View File

@ -1,5 +1,6 @@
use crate::eth1_chain::Error as Eth1ChainError;
use crate::fork_choice::Error as ForkChoiceError;
use ssz_types::Error as SszTypesError;
use state_processing::per_block_processing::errors::AttestationValidationError;
use state_processing::BlockProcessingError;
use state_processing::SlotProcessingError;
@ -38,12 +39,18 @@ pub enum BeaconChainError {
beacon_block_root: Hash256,
},
AttestationValidationError(AttestationValidationError),
StateSkipTooLarge {
head_slot: Slot,
requested_slot: Slot,
},
/// Returned when an internal check fails, indicating corrupt data.
InvariantViolated(String),
SszTypesError(SszTypesError),
}
easy_from_to!(SlotProcessingError, BeaconChainError);
easy_from_to!(AttestationValidationError, BeaconChainError);
easy_from_to!(SszTypesError, BeaconChainError);
#[derive(Debug, PartialEq)]
pub enum BlockProductionError {

View File

@ -13,7 +13,6 @@ beacon_chain = { path = "../beacon_chain" }
store = { path = "../store" }
network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" }
rpc = { path = "../rpc" }
rest_api = { path = "../rest_api" }
websocket_server = { path = "../websocket_server" }
prometheus = "0.7.0"

View File

@ -19,7 +19,6 @@ use genesis::{
use lighthouse_bootstrap::Bootstrapper;
use lmd_ghost::LmdGhost;
use network::{NetworkConfig, NetworkMessage, Service as NetworkService};
use rpc::Config as RpcConfig;
use slog::{debug, error, info, warn};
use std::net::SocketAddr;
use std::path::Path;
@ -267,35 +266,6 @@ where
Ok(self)
}
/// Immediately starts the gRPC server (gRPC is soon to be deprecated).
pub fn grpc_server(mut self, config: &RpcConfig) -> Result<Self, String> {
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "grpc_server requires a beacon chain")?;
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "grpc_server requires a runtime_context")?
.service_context("grpc");
let network_send = self
.libp2p_network_send
.clone()
.ok_or_else(|| "grpc_server requires a libp2p network")?;
let exit_signal = rpc::start_server(
config,
&context.executor,
network_send,
beacon_chain,
context.log,
);
self.exit_signals.push(exit_signal);
Ok(self)
}
/// Immediately starts the beacon node REST API http server.
pub fn http_server(
mut self,
@ -305,7 +275,7 @@ where
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "grpc_server requires a beacon chain")?;
.ok_or_else(|| "http_server requires a beacon chain")?;
let context = self
.runtime_context
.as_ref()
@ -314,11 +284,11 @@ where
let network = self
.libp2p_network
.clone()
.ok_or_else(|| "grpc_server requires a libp2p network")?;
.ok_or_else(|| "http_server requires a libp2p network")?;
let network_send = self
.libp2p_network_send
.clone()
.ok_or_else(|| "grpc_server requires a libp2p network sender")?;
.ok_or_else(|| "http_server requires a libp2p network sender")?;
let network_info = rest_api::NetworkInfo {
network_service: network.clone(),

View File

@ -51,7 +51,6 @@ pub struct Config {
/// via the CLI at runtime, instead of from a configuration file saved to disk.
pub genesis: ClientGenesis,
pub network: network::NetworkConfig,
pub rpc: rpc::Config,
pub rest_api: rest_api::Config,
pub websocket_server: websocket_server::Config,
pub eth1: eth1::Config,
@ -66,7 +65,6 @@ impl Default for Config {
db_name: "chain_db".to_string(),
genesis: <_>::default(),
network: NetworkConfig::new(),
rpc: <_>::default(),
rest_api: <_>::default(),
websocket_server: <_>::default(),
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
@ -107,7 +105,6 @@ impl Config {
};
self.network.apply_cli_args(args)?;
self.rpc.apply_cli_args(args)?;
self.rest_api.apply_cli_args(args)?;
self.websocket_server.apply_cli_args(args)?;

View File

@ -6,6 +6,7 @@ pub mod builder;
pub mod error;
use beacon_chain::BeaconChain;
use eth2_libp2p::{Enr, Multiaddr};
use exit_future::Signal;
use network::Service as NetworkService;
use std::net::SocketAddr;
@ -48,6 +49,16 @@ impl<T: BeaconChainTypes> Client<T> {
pub fn libp2p_listen_port(&self) -> Option<u16> {
self.libp2p_network.as_ref().map(|n| n.listen_port())
}
/// Returns the list of libp2p addresses the client is listening to.
pub fn libp2p_listen_addresses(&self) -> Option<Vec<Multiaddr>> {
self.libp2p_network.as_ref().map(|n| n.listen_multiaddrs())
}
/// Returns the local libp2p ENR of this node, for network discovery.
pub fn enr(&self) -> Option<Enr> {
self.libp2p_network.as_ref().map(|n| n.local_enr())
}
}
impl<T: BeaconChainTypes> Drop for Client<T> {

View File

@ -7,9 +7,10 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
hex = "0.3"
#SigP repository
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "2a9ded92db30dab7d3530c597a0a3b3458a7dfb7" }
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "2a9ded92db30dab7d3530c597a0a3b3458a7dfb7", features = ["serde"] }
# rust-libp2p is presently being sourced from a Sigma Prime fork of the
# `libp2p/rust-libp2p` repository.
libp2p = { git = "https://github.com/sigp/rust-libp2p", rev = "2a9ded92db30dab7d3530c597a0a3b3458a7dfb7" }
enr = { git = "https://github.com/sigp/rust-libp2p/", rev = "2a9ded92db30dab7d3530c597a0a3b3458a7dfb7", features = ["serde"] }
types = { path = "../../eth2/types" }
serde = "1.0.102"
serde_derive = "1.0.102"

View File

@ -1,7 +1,7 @@
[package]
name = "rest_api"
version = "0.1.0"
authors = ["Luke Anderson <luke@lukeanderson.com.au>"]
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -12,28 +12,31 @@ network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" }
store = { path = "../store" }
version = { path = "../version" }
serde = { version = "1.0.102", features = ["derive"] }
serde_json = "1.0.41"
serde_yaml = "0.8.11"
slog = "2.5.2"
slog-term = "2.4.2"
slog-async = "2.3.0"
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.8"
slog = "2.5"
slog-term = "2.4"
slog-async = "2.3"
eth2_ssz = { path = "../../eth2/utils/ssz" }
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
state_processing = { path = "../../eth2/state_processing" }
types = { path = "../../eth2/types" }
clap = "2.33.0"
http = "0.1.19"
prometheus = { version = "0.7.0", features = ["process"] }
hyper = "0.12.35"
clap = "2.33"
http = "0.1"
hyper = "0.12"
exit-future = "0.1.4"
tokio = "0.1.22"
url = "2.1.0"
lazy_static = "1.4.0"
url = "2.1"
lazy_static = "1.3.0"
eth2_config = { path = "../../eth2/utils/eth2_config" }
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
slot_clock = { path = "../../eth2/utils/slot_clock" }
hex = "0.3"
parking_lot = "0.9.0"
parking_lot = "0.9"
futures = "0.1.29"
[dev-dependencies]
remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" }
node_test_rig = { path = "../../tests/node_test_rig" }
tree_hash = { path = "../../eth2/utils/tree_hash" }

View File

@ -3,13 +3,13 @@ use crate::response_builder::ResponseBuilder;
use crate::{ApiError, ApiResult, UrlQuery};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use hyper::{Body, Request};
use serde::Serialize;
use serde::{Deserialize, Serialize};
use ssz_derive::Encode;
use std::sync::Arc;
use store::Store;
use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot, Validator};
#[derive(Serialize, Encode)]
#[derive(Serialize, Deserialize, Encode)]
pub struct HeadResponse {
pub slot: Slot,
pub block_root: Hash256,
@ -23,12 +23,10 @@ pub struct HeadResponse {
}
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
pub fn get_head<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = req
.extensions()
.get::<Arc<BeaconChain<T>>>()
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
pub fn get_head<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let chain_head = beacon_chain.head();
let head = HeadResponse {
@ -66,12 +64,10 @@ pub struct BlockResponse<T: EthSpec> {
}
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
pub fn get_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = req
.extensions()
.get::<Arc<BeaconChain<T>>>()
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
pub fn get_block<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let query_params = ["root", "slot"];
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
@ -106,9 +102,10 @@ pub fn get_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult
}
/// HTTP handler to return a `BeaconBlock` root at a given `slot`.
pub fn get_block_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_block_root<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
let target = parse_slot(&slot_string)?;
@ -120,8 +117,10 @@ pub fn get_block_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiR
}
/// HTTP handler to return the `Fork` of the current head.
pub fn get_fork<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_fork<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.fork)
}
@ -129,9 +128,10 @@ pub fn get_fork<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult
///
/// The `Epoch` parameter can be any epoch number. If it is not specified,
/// the current epoch is assumed.
pub fn get_validators<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_validators<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let epoch = match UrlQuery::from_request(&req) {
// We have some parameters, so make sure it's the epoch one and parse it
Ok(query) => query
@ -168,8 +168,10 @@ pub struct StateResponse<T: EthSpec> {
///
/// Will not return a state if the request slot is in the future. Will return states higher than
/// the current head by skipping slots.
pub fn get_state<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_state<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let head_state = beacon_chain.head().beacon_state;
let (key, value) = match UrlQuery::from_request(&req) {
@ -214,9 +216,10 @@ pub fn get_state<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult
///
/// Will not return a state if the request slot is in the future. Will return states higher than
/// the current head by skipping slots.
pub fn get_state_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_state_root<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
let slot = parse_slot(&slot_string)?;
@ -226,10 +229,10 @@ pub fn get_state_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiR
}
/// HTTP handler to return the highest finalized slot.
pub fn get_current_finalized_checkpoint<T: BeaconChainTypes + 'static>(
pub fn get_current_finalized_checkpoint<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
let head_state = beacon_chain.head().beacon_state;
let checkpoint = head_state.finalized_checkpoint.clone();
@ -238,10 +241,19 @@ pub fn get_current_finalized_checkpoint<T: BeaconChainTypes + 'static>(
}
/// HTTP handler to return a `BeaconState` at the genesis block.
pub fn get_genesis_state<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_genesis_state<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?;
ResponseBuilder::new(&req)?.body(&state)
}
/// Read the genesis time from the current beacon chain state.
pub fn get_genesis_time<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.genesis_time)
}

View File

@ -2,6 +2,34 @@ use clap::ArgMatches;
use serde::{Deserialize, Serialize};
use std::net::Ipv4Addr;
/// Defines the encoding for the API.
#[derive(Clone, Serialize, Deserialize, Copy)]
pub enum ApiEncodingFormat {
JSON,
YAML,
SSZ,
}
impl ApiEncodingFormat {
pub fn get_content_type(&self) -> &str {
match self {
ApiEncodingFormat::JSON => "application/json",
ApiEncodingFormat::YAML => "application/yaml",
ApiEncodingFormat::SSZ => "application/ssz",
}
}
}
impl From<&str> for ApiEncodingFormat {
fn from(f: &str) -> ApiEncodingFormat {
match f {
"application/yaml" => ApiEncodingFormat::YAML,
"application/ssz" => ApiEncodingFormat::SSZ,
_ => ApiEncodingFormat::JSON,
}
}
}
/// HTTP REST API Configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {

View File

@ -10,13 +10,16 @@ use http::header;
use hyper::{Body, Request};
use network::NetworkMessage;
use parking_lot::RwLock;
use ssz::Encode;
use ssz::{Decode, Encode};
use std::sync::Arc;
use store::{iter::AncestorIter, Store};
use tokio::sync::mpsc;
use types::{Attestation, BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot};
use types::{
Attestation, BeaconBlock, BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch,
Signature, Slot,
};
/// Parse a slot from a `0x` preixed string.
/// Parse a slot.
///
/// E.g., `"1234"`
pub fn parse_slot(string: &str) -> Result<Slot, ApiError> {
@ -26,6 +29,25 @@ pub fn parse_slot(string: &str) -> Result<Slot, ApiError> {
.map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e)))
}
/// Parse an epoch.
///
/// E.g., `"13"`
pub fn parse_epoch(string: &str) -> Result<Epoch, ApiError> {
string
.parse::<u64>()
.map(Epoch::from)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e)))
}
/// Parse a CommitteeIndex.
///
/// E.g., `"18"`
pub fn parse_committee_index(string: &str) -> Result<CommitteeIndex, ApiError> {
string
.parse::<CommitteeIndex>()
.map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e)))
}
/// Checks the provided request to ensure that the `content-type` header.
///
/// The content-type header should either be omitted, in which case JSON is assumed, or it should
@ -41,6 +63,23 @@ pub fn check_content_type_for_json(req: &Request<Body>) -> Result<(), ApiError>
}
}
/// Parse a signature from a `0x` preixed string.
pub fn parse_signature(string: &str) -> Result<Signature, ApiError> {
const PREFIX: &str = "0x";
if string.starts_with(PREFIX) {
let trimmed = string.trim_start_matches(PREFIX);
let bytes = hex::decode(trimmed)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse signature hex: {:?}", e)))?;
Signature::from_ssz_bytes(&bytes)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse signature bytes: {:?}", e)))
} else {
Err(ApiError::BadRequest(
"Signature must have a 0x prefix".to_string(),
))
}
}
/// Parse a root from a `0x` preixed string.
///
/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"`
@ -54,7 +93,7 @@ pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
.map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e)))
} else {
Err(ApiError::BadRequest(
"Root must have a '0x' prefix".to_string(),
"Root must have a 0x prefix".to_string(),
))
}
}
@ -71,7 +110,7 @@ pub fn parse_pubkey(string: &str) -> Result<PublicKey, ApiError> {
Ok(pubkey)
} else {
Err(ApiError::BadRequest(
"Public key must have a '0x' prefix".to_string(),
"Public key must have a 0x prefix".to_string(),
))
}
}
@ -194,26 +233,6 @@ pub fn implementation_pending_response(_req: Request<Body>) -> ApiResult {
))
}
pub fn get_beacon_chain_from_request<T: BeaconChainTypes + 'static>(
req: &Request<Body>,
) -> Result<(Arc<BeaconChain<T>>), ApiError> {
// Get beacon state
let beacon_chain = req
.extensions()
.get::<Arc<BeaconChain<T>>>()
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?;
Ok(beacon_chain.clone())
}
pub fn get_logger_from_request(req: &Request<Body>) -> slog::Logger {
let log = req
.extensions()
.get::<slog::Logger>()
.expect("Should always get the logger from the request, since we put it in there.");
log.to_owned()
}
pub fn publish_beacon_block_to_network<T: BeaconChainTypes + 'static>(
chan: Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>,
block: BeaconBlock<T::EthSpec>,

View File

@ -5,13 +5,14 @@ extern crate lazy_static;
extern crate network as client_network;
mod beacon;
mod config;
pub mod config;
mod error;
mod helpers;
mod metrics;
mod network;
mod node;
mod response_builder;
mod router;
mod spec;
mod url_query;
mod validator;
@ -19,12 +20,13 @@ mod validator;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use client_network::NetworkMessage;
use client_network::Service as NetworkService;
pub use config::ApiEncodingFormat;
use error::{ApiError, ApiResult};
use eth2_config::Eth2Config;
use futures::future::IntoFuture;
use hyper::rt::Future;
use hyper::service::Service;
use hyper::{Body, Method, Request, Response, Server};
use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use parking_lot::RwLock;
use slog::{info, warn};
use std::net::SocketAddr;
@ -35,167 +37,19 @@ use tokio::runtime::TaskExecutor;
use tokio::sync::mpsc;
use url_query::UrlQuery;
pub use crate::helpers::parse_pubkey;
pub use beacon::{BlockResponse, HeadResponse, StateResponse};
pub use config::Config;
pub use validator::{BulkValidatorDutiesRequest, ValidatorDuty};
type BoxFut = Box<dyn Future<Item = Response<Body>, Error = ApiError> + Send>;
pub struct ApiService<T: BeaconChainTypes + 'static> {
log: slog::Logger,
beacon_chain: Arc<BeaconChain<T>>,
db_path: DBPath,
network_service: Arc<NetworkService<T>>,
network_channel: Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>,
eth2_config: Arc<Eth2Config>,
}
pub type BoxFut = Box<dyn Future<Item = Response<Body>, Error = ApiError> + Send>;
pub type NetworkChannel = Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>;
pub struct NetworkInfo<T: BeaconChainTypes> {
pub network_service: Arc<NetworkService<T>>,
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
}
fn into_boxfut<F: IntoFuture + 'static>(item: F) -> BoxFut
where
F: IntoFuture<Item = Response<Body>, Error = ApiError>,
F::Future: Send,
{
Box::new(item.into_future())
}
impl<T: BeaconChainTypes> Service for ApiService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = ApiError;
type Future = BoxFut;
fn call(&mut self, mut req: Request<Body>) -> Self::Future {
metrics::inc_counter(&metrics::REQUEST_COUNT);
let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME);
// Add all the useful bits into the request, so that we can pull them out in the individual
// functions.
req.extensions_mut()
.insert::<slog::Logger>(self.log.clone());
req.extensions_mut()
.insert::<Arc<BeaconChain<T>>>(self.beacon_chain.clone());
req.extensions_mut().insert::<DBPath>(self.db_path.clone());
req.extensions_mut()
.insert::<Arc<NetworkService<T>>>(self.network_service.clone());
req.extensions_mut()
.insert::<Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>>(
self.network_channel.clone(),
);
req.extensions_mut()
.insert::<Arc<Eth2Config>>(self.eth2_config.clone());
let path = req.uri().path().to_string();
// Route the request to the correct handler.
let result = match (req.method(), path.as_ref()) {
// Methods for Client
(&Method::GET, "/node/version") => into_boxfut(node::get_version(req)),
(&Method::GET, "/node/genesis_time") => into_boxfut(node::get_genesis_time::<T>(req)),
(&Method::GET, "/node/syncing") => {
into_boxfut(helpers::implementation_pending_response(req))
}
// Methods for Network
(&Method::GET, "/network/enr") => into_boxfut(network::get_enr::<T>(req)),
(&Method::GET, "/network/peer_count") => into_boxfut(network::get_peer_count::<T>(req)),
(&Method::GET, "/network/peer_id") => into_boxfut(network::get_peer_id::<T>(req)),
(&Method::GET, "/network/peers") => into_boxfut(network::get_peer_list::<T>(req)),
(&Method::GET, "/network/listen_port") => {
into_boxfut(network::get_listen_port::<T>(req))
}
(&Method::GET, "/network/listen_addresses") => {
into_boxfut(network::get_listen_addresses::<T>(req))
}
// Methods for Beacon Node
(&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::<T>(req)),
(&Method::GET, "/beacon/block") => into_boxfut(beacon::get_block::<T>(req)),
(&Method::GET, "/beacon/block_root") => into_boxfut(beacon::get_block_root::<T>(req)),
(&Method::GET, "/beacon/blocks") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::<T>(req)),
(&Method::GET, "/beacon/attestations") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/attestations/pending") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/validators") => into_boxfut(beacon::get_validators::<T>(req)),
(&Method::GET, "/beacon/validators/indicies") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/validators/pubkeys") => {
into_boxfut(helpers::implementation_pending_response(req))
}
// Methods for Validator
(&Method::GET, "/beacon/validator/duties") => {
into_boxfut(validator::get_validator_duties::<T>(req))
}
(&Method::GET, "/beacon/validator/block") => {
into_boxfut(validator::get_new_beacon_block::<T>(req))
}
(&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::<T>(req),
(&Method::GET, "/beacon/validator/attestation") => {
into_boxfut(validator::get_new_attestation::<T>(req))
}
(&Method::POST, "/beacon/validator/attestation") => {
validator::publish_attestation::<T>(req)
}
(&Method::GET, "/beacon/state") => into_boxfut(beacon::get_state::<T>(req)),
(&Method::GET, "/beacon/state_root") => into_boxfut(beacon::get_state_root::<T>(req)),
(&Method::GET, "/beacon/state/current_finalized_checkpoint") => {
into_boxfut(beacon::get_current_finalized_checkpoint::<T>(req))
}
(&Method::GET, "/beacon/state/genesis") => {
into_boxfut(beacon::get_genesis_state::<T>(req))
}
//TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances
// Methods for bootstrap and checking configuration
(&Method::GET, "/spec") => into_boxfut(spec::get_spec::<T>(req)),
(&Method::GET, "/spec/slots_per_epoch") => {
into_boxfut(spec::get_slots_per_epoch::<T>(req))
}
(&Method::GET, "/spec/deposit_contract") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/spec/eth2_config") => into_boxfut(spec::get_eth2_config::<T>(req)),
(&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::<T>(req)),
_ => Box::new(futures::future::err(ApiError::NotFound(
"Request path and/or method not found.".to_owned(),
))),
};
let response = match result.wait() {
// Return the `hyper::Response`.
Ok(response) => {
metrics::inc_counter(&metrics::SUCCESS_COUNT);
slog::debug!(self.log, "Request successful: {:?}", path);
response
}
// Map the `ApiError` into `hyper::Response`.
Err(e) => {
slog::debug!(self.log, "Request failure: {:?}", path);
e.into()
}
};
metrics::stop_timer(timer);
Box::new(futures::future::ok(response))
}
}
pub fn start_server<T: BeaconChainTypes>(
config: &Config,
executor: &TaskExecutor,
@ -205,46 +59,54 @@ pub fn start_server<T: BeaconChainTypes>(
eth2_config: Eth2Config,
log: slog::Logger,
) -> Result<(exit_future::Signal, SocketAddr), hyper::Error> {
// build a channel to kill the HTTP server
let (exit_signal, exit) = exit_future::signal();
let exit_log = log.clone();
let server_exit = exit.and_then(move |_| {
info!(exit_log, "API service shutdown");
Ok(())
});
let db_path = DBPath(db_path);
// Get the address to bind to
let bind_addr = (config.listen_address, config.port).into();
// Clone our stateful objects, for use in service closure.
let server_log = log.clone();
let server_bc = beacon_chain.clone();
let inner_log = log.clone();
let eth2_config = Arc::new(eth2_config);
let service = move || -> futures::future::FutureResult<ApiService<T>, String> {
futures::future::ok(ApiService {
log: server_log.clone(),
beacon_chain: server_bc.clone(),
db_path: db_path.clone(),
network_service: network_info.network_service.clone(),
network_channel: Arc::new(RwLock::new(network_info.network_chan.clone())),
eth2_config: eth2_config.clone(),
// Define the function that will build the request handler.
let make_service = make_service_fn(move |_socket: &AddrStream| {
let beacon_chain = beacon_chain.clone();
let log = inner_log.clone();
let eth2_config = eth2_config.clone();
let network_service = network_info.network_service.clone();
let network_channel = Arc::new(RwLock::new(network_info.network_chan.clone()));
let db_path = db_path.clone();
service_fn(move |req: Request<Body>| {
router::route(
req,
beacon_chain.clone(),
network_service.clone(),
network_channel.clone(),
eth2_config.clone(),
log.clone(),
db_path.clone(),
)
})
};
});
let log_clone = log.clone();
let server = Server::bind(&bind_addr).serve(service);
let bind_addr = (config.listen_address, config.port).into();
let server = Server::bind(&bind_addr).serve(make_service);
// Determine the address the server is actually listening on.
//
// This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free
// port).
let actual_listen_addr = server.local_addr();
// Build a channel to kill the HTTP server.
let (exit_signal, exit) = exit_future::signal();
let inner_log = log.clone();
let server_exit = exit.and_then(move |_| {
info!(inner_log, "API service shutdown");
Ok(())
});
// Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered.
let inner_log = log.clone();
let server_future = server
.with_graceful_shutdown(server_exit)
.map_err(move |e| {
warn!(
log_clone,
inner_log,
"API failed to start, Unable to bind"; "address" => format!("{:?}", e)
)
});

View File

@ -1,9 +1,10 @@
use crate::helpers::get_beacon_chain_from_request;
use crate::response_builder::ResponseBuilder;
use crate::{ApiError, ApiResult, DBPath};
use beacon_chain::BeaconChainTypes;
use crate::{ApiError, ApiResult};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use hyper::{Body, Request};
use prometheus::{Encoder, TextEncoder};
use lighthouse_metrics::{Encoder, TextEncoder};
use std::path::PathBuf;
use std::sync::Arc;
pub use lighthouse_metrics::*;
@ -27,16 +28,14 @@ lazy_static! {
/// # Note
///
/// This is a HTTP handler method.
pub fn get_prometheus<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
pub fn get_prometheus<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
db_path: PathBuf,
) -> ApiResult {
let mut buffer = vec![];
let encoder = TextEncoder::new();
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
let db_path = req
.extensions()
.get::<DBPath>()
.ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?;
// There are two categories of metrics:
//
// - Dynamically updated: things like histograms and event counters that are updated on the

View File

@ -9,11 +9,10 @@ use std::sync::Arc;
/// HTTP handler to return the list of libp2p multiaddr the client is listening on.
///
/// Returns a list of `Multiaddr`, serialized according to their `serde` impl.
pub fn get_listen_addresses<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there");
pub fn get_listen_addresses<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
let multiaddresses: Vec<Multiaddr> = network.listen_multiaddrs();
ResponseBuilder::new(&req)?.body_no_ssz(&multiaddresses)
}
@ -21,54 +20,48 @@ pub fn get_listen_addresses<T: BeaconChainTypes>(req: Request<Body>) -> ApiResul
/// HTTP handler to return the network port the client is listening on.
///
/// Returns the TCP port number in its plain form (which is also valid JSON serialization)
pub fn get_listen_port<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there")
.clone();
pub fn get_listen_port<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body(&network.listen_port())
}
/// HTTP handler to return the Discv5 ENR from the client's libp2p service.
///
/// ENR is encoded as base64 string.
pub fn get_enr<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there");
pub fn get_enr<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&network.local_enr().to_base64())
}
/// HTTP handler to return the `PeerId` from the client's libp2p service.
///
/// PeerId is encoded as base58 string.
pub fn get_peer_id<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there");
pub fn get_peer_id<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&network.local_peer_id().to_base58())
}
/// HTTP handler to return the number of peers connected in the client's libp2p service.
pub fn get_peer_count<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there");
pub fn get_peer_count<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body(&network.connected_peers())
}
/// HTTP handler to return the list of peers connected to the client's libp2p service.
///
/// Peers are presented as a list of `PeerId::to_string()`.
pub fn get_peer_list<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
let network = req
.extensions()
.get::<Arc<NetworkService<T>>>()
.expect("The network service should always be there, we put it there");
pub fn get_peer_list<T: BeaconChainTypes>(
req: Request<Body>,
network: Arc<NetworkService<T>>,
) -> ApiResult {
let connected_peers: Vec<String> = network
.connected_peer_set()
.iter()

View File

@ -1,7 +1,5 @@
use crate::helpers::get_beacon_chain_from_request;
use crate::response_builder::ResponseBuilder;
use crate::ApiResult;
use beacon_chain::BeaconChainTypes;
use hyper::{Body, Request};
use version;
@ -9,9 +7,3 @@ use version;
pub fn get_version(req: Request<Body>) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&version::version())
}
/// Read the genesis time from the current beacon chain state.
pub fn get_genesis_time<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.genesis_time)
}

View File

@ -1,47 +1,36 @@
use super::{ApiError, ApiResult};
use crate::config::ApiEncodingFormat;
use http::header;
use hyper::{Body, Request, Response, StatusCode};
use serde::Serialize;
use ssz::Encode;
pub enum Encoding {
JSON,
SSZ,
YAML,
TEXT,
}
pub struct ResponseBuilder {
encoding: Encoding,
encoding: ApiEncodingFormat,
}
impl ResponseBuilder {
pub fn new(req: &Request<Body>) -> Result<Self, ApiError> {
let content_header: String = req
let accept_header: String = req
.headers()
.get(header::CONTENT_TYPE)
.get(header::ACCEPT)
.map_or(Ok(""), |h| h.to_str())
.map_err(|e| {
ApiError::BadRequest(format!(
"The content-type header contains invalid characters: {:?}",
"The Accept header contains invalid characters: {:?}",
e
))
})
.map(String::from)?;
// JSON is our default encoding, unless something else is requested.
let encoding = match content_header {
ref h if h.starts_with("application/ssz") => Encoding::SSZ,
ref h if h.starts_with("application/yaml") => Encoding::YAML,
ref h if h.starts_with("text/") => Encoding::TEXT,
_ => Encoding::JSON,
};
let encoding = ApiEncodingFormat::from(accept_header.as_str());
Ok(Self { encoding })
}
pub fn body<T: Serialize + Encode>(self, item: &T) -> ApiResult {
match self.encoding {
Encoding::SSZ => Response::builder()
ApiEncodingFormat::SSZ => Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/ssz")
.body(Body::from(item.as_ssz_bytes()))
@ -52,7 +41,7 @@ impl ResponseBuilder {
pub fn body_no_ssz<T: Serialize>(self, item: &T) -> ApiResult {
let (body, content_type) = match self.encoding {
Encoding::JSON => (
ApiEncodingFormat::JSON => (
Body::from(serde_json::to_string(&item).map_err(|e| {
ApiError::ServerError(format!(
"Unable to serialize response body as JSON: {:?}",
@ -61,12 +50,12 @@ impl ResponseBuilder {
})?),
"application/json",
),
Encoding::SSZ => {
ApiEncodingFormat::SSZ => {
return Err(ApiError::UnsupportedType(
"Response cannot be encoded as SSZ.".into(),
));
}
Encoding::YAML => (
ApiEncodingFormat::YAML => (
Body::from(serde_yaml::to_string(&item).map_err(|e| {
ApiError::ServerError(format!(
"Unable to serialize response body as YAML: {:?}",
@ -75,11 +64,6 @@ impl ResponseBuilder {
})?),
"application/yaml",
),
Encoding::TEXT => {
return Err(ApiError::UnsupportedType(
"Response cannot be encoded as plain text.".into(),
));
}
};
Response::builder()

View File

@ -0,0 +1,172 @@
use crate::{
beacon, error::ApiError, helpers, metrics, network, node, spec, validator, BoxFut,
NetworkChannel,
};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use client_network::Service as NetworkService;
use eth2_config::Eth2Config;
use futures::{Future, IntoFuture};
use hyper::{Body, Error, Method, Request, Response};
use slog::debug;
use std::path::PathBuf;
use std::sync::Arc;
fn into_boxfut<F: IntoFuture + 'static>(item: F) -> BoxFut
where
F: IntoFuture<Item = Response<Body>, Error = ApiError>,
F::Future: Send,
{
Box::new(item.into_future())
}
pub fn route<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
network_service: Arc<NetworkService<T>>,
network_channel: NetworkChannel,
eth2_config: Arc<Eth2Config>,
local_log: slog::Logger,
db_path: PathBuf,
) -> impl Future<Item = Response<Body>, Error = Error> {
metrics::inc_counter(&metrics::REQUEST_COUNT);
let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME);
let path = req.uri().path().to_string();
let log = local_log.clone();
let request_result: Box<dyn Future<Item = Response<_>, Error = _> + Send> =
match (req.method(), path.as_ref()) {
// Methods for Client
(&Method::GET, "/node/version") => into_boxfut(node::get_version(req)),
(&Method::GET, "/node/syncing") => {
into_boxfut(helpers::implementation_pending_response(req))
}
// Methods for Network
(&Method::GET, "/network/enr") => {
into_boxfut(network::get_enr::<T>(req, network_service))
}
(&Method::GET, "/network/peer_count") => {
into_boxfut(network::get_peer_count::<T>(req, network_service))
}
(&Method::GET, "/network/peer_id") => {
into_boxfut(network::get_peer_id::<T>(req, network_service))
}
(&Method::GET, "/network/peers") => {
into_boxfut(network::get_peer_list::<T>(req, network_service))
}
(&Method::GET, "/network/listen_port") => {
into_boxfut(network::get_listen_port::<T>(req, network_service))
}
(&Method::GET, "/network/listen_addresses") => {
into_boxfut(network::get_listen_addresses::<T>(req, network_service))
}
// Methods for Beacon Node
(&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::<T>(req, beacon_chain)),
(&Method::GET, "/beacon/block") => {
into_boxfut(beacon::get_block::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/block_root") => {
into_boxfut(beacon::get_block_root::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/blocks") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::<T>(req, beacon_chain)),
(&Method::GET, "/beacon/attestations") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/attestations/pending") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/genesis_time") => {
into_boxfut(beacon::get_genesis_time::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/validators") => {
into_boxfut(beacon::get_validators::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/validators/indicies") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/beacon/validators/pubkeys") => {
into_boxfut(helpers::implementation_pending_response(req))
}
// Methods for Validator
(&Method::GET, "/validator/duties") => {
into_boxfut(validator::get_validator_duties::<T>(req, beacon_chain))
}
(&Method::POST, "/validator/duties") => {
validator::post_validator_duties::<T>(req, beacon_chain)
}
(&Method::GET, "/validator/block") => {
into_boxfut(validator::get_new_beacon_block::<T>(req, beacon_chain))
}
(&Method::POST, "/validator/block") => {
validator::publish_beacon_block::<T>(req, beacon_chain, network_channel, log)
}
(&Method::GET, "/validator/attestation") => {
into_boxfut(validator::get_new_attestation::<T>(req, beacon_chain))
}
(&Method::POST, "/validator/attestation") => {
validator::publish_attestation::<T>(req, beacon_chain, network_channel, log)
}
(&Method::GET, "/beacon/state") => {
into_boxfut(beacon::get_state::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/state_root") => {
into_boxfut(beacon::get_state_root::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/state/current_finalized_checkpoint") => into_boxfut(
beacon::get_current_finalized_checkpoint::<T>(req, beacon_chain),
),
(&Method::GET, "/beacon/state/genesis") => {
into_boxfut(beacon::get_genesis_state::<T>(req, beacon_chain))
}
//TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances
// Methods for bootstrap and checking configuration
(&Method::GET, "/spec") => into_boxfut(spec::get_spec::<T>(req, beacon_chain)),
(&Method::GET, "/spec/slots_per_epoch") => {
into_boxfut(spec::get_slots_per_epoch::<T>(req))
}
(&Method::GET, "/spec/deposit_contract") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/spec/eth2_config") => {
into_boxfut(spec::get_eth2_config::<T>(req, eth2_config))
}
(&Method::GET, "/metrics") => {
into_boxfut(metrics::get_prometheus::<T>(req, beacon_chain, db_path))
}
_ => Box::new(futures::future::err(ApiError::NotFound(
"Request path and/or method not found.".to_owned(),
))),
};
// Map the Rust-friendly `Result` in to a http-friendly response. In effect, this ensures that
// any `Err` returned from our response handlers becomes a valid http response to the client
// (e.g., a response with a 404 or 500 status).
request_result.then(move |result| match result {
Ok(response) => {
debug!(local_log, "Request successful: {:?}", path);
metrics::inc_counter(&metrics::SUCCESS_COUNT);
metrics::stop_timer(timer);
Ok(response)
}
Err(e) => {
let error_response = e.into();
debug!(local_log, "Request failure: {:?}", path);
metrics::stop_timer(timer);
Ok(error_response)
}
})
}

View File

@ -1,30 +1,28 @@
use super::ApiResult;
use crate::helpers::get_beacon_chain_from_request;
use crate::response_builder::ResponseBuilder;
use crate::ApiError;
use beacon_chain::BeaconChainTypes;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_config::Eth2Config;
use hyper::{Body, Request};
use std::sync::Arc;
use types::EthSpec;
/// HTTP handler to return the full spec object.
pub fn get_spec<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_spec<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(&beacon_chain.spec)
}
/// HTTP handler to return the full Eth2Config object.
pub fn get_eth2_config<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let eth2_config = req
.extensions()
.get::<Arc<Eth2Config>>()
.ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?;
pub fn get_eth2_config<T: BeaconChainTypes>(
req: Request<Body>,
eth2_config: Arc<Eth2Config>,
) -> ApiResult {
ResponseBuilder::new(&req)?.body_no_ssz(eth2_config.as_ref())
}
/// HTTP handler to return the full spec object.
pub fn get_slots_per_epoch<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
pub fn get_slots_per_epoch<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
ResponseBuilder::new(&req)?.body(&T::EthSpec::slots_per_epoch())
}

View File

@ -1,5 +1,7 @@
use crate::helpers::{parse_committee_index, parse_epoch, parse_signature, parse_slot};
use crate::ApiError;
use hyper::Request;
use types::{CommitteeIndex, Epoch, Signature, Slot};
/// Provides handy functions for parsing the query parameters of a URL.
@ -77,6 +79,30 @@ impl<'a> UrlQuery<'a> {
.collect();
Ok(queries)
}
/// Returns the value of the first occurrence of the `epoch` key.
pub fn epoch(self) -> Result<Epoch, ApiError> {
self.first_of(&["epoch"])
.and_then(|(_key, value)| parse_epoch(&value))
}
/// Returns the value of the first occurrence of the `slot` key.
pub fn slot(self) -> Result<Slot, ApiError> {
self.first_of(&["slot"])
.and_then(|(_key, value)| parse_slot(&value))
}
/// Returns the value of the first occurrence of the `committee_index` key.
pub fn committee_index(self) -> Result<CommitteeIndex, ApiError> {
self.first_of(&["committee_index"])
.and_then(|(_key, value)| parse_committee_index(&value))
}
/// Returns the value of the first occurrence of the `randao_reveal` key.
pub fn randao_reveal(self) -> Result<Signature, ApiError> {
self.first_of(&["randao_reveal"])
.and_then(|(_key, value)| parse_signature(&value))
}
}
#[cfg(test)]

View File

@ -1,96 +1,127 @@
use crate::helpers::{
check_content_type_for_json, get_beacon_chain_from_request, get_logger_from_request,
parse_pubkey, publish_attestation_to_network, publish_beacon_block_to_network,
check_content_type_for_json, parse_pubkey, publish_attestation_to_network,
publish_beacon_block_to_network,
};
use crate::response_builder::ResponseBuilder;
use crate::{ApiError, ApiResult, BoxFut, UrlQuery};
use beacon_chain::{AttestationProcessingOutcome, BeaconChainTypes, BlockProcessingOutcome};
use bls::{AggregateSignature, PublicKey, Signature};
use crate::{ApiError, ApiResult, BoxFut, NetworkChannel, UrlQuery};
use beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome,
};
use bls::PublicKey;
use futures::future::Future;
use futures::stream::Stream;
use hyper::{Body, Request};
use network::NetworkMessage;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use slog::{info, trace, warn};
use slog::{info, warn, Logger};
use ssz_derive::{Decode, Encode};
use std::sync::Arc;
use tokio;
use tokio::sync::mpsc;
use types::beacon_state::EthSpec;
use types::{Attestation, BeaconBlock, BitList, CommitteeIndex, Epoch, RelativeEpoch, Slot};
use types::{Attestation, BeaconBlock, CommitteeIndex, Epoch, RelativeEpoch, Slot};
#[derive(Debug, Serialize, Deserialize)]
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)]
pub struct ValidatorDuty {
/// The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._
pub validator_pubkey: String,
pub validator_pubkey: PublicKey,
/// The slot at which the validator must attest.
pub attestation_slot: Option<Slot>,
/// The index of the committee within `slot` of which the validator is a member.
pub attestation_committee_index: Option<CommitteeIndex>,
/// The position of the validator in the committee.
pub attestation_committee_position: Option<usize>,
/// The slot in which a validator must propose a block, or `null` if block production is not required.
pub block_proposal_slot: Option<Slot>,
}
impl ValidatorDuty {
pub fn new() -> ValidatorDuty {
ValidatorDuty {
validator_pubkey: "".to_string(),
attestation_slot: None,
attestation_committee_index: None,
block_proposal_slot: None,
}
}
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct BulkValidatorDutiesRequest {
pub epoch: Epoch,
pub pubkeys: Vec<PublicKey>,
}
/// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch. This
/// method allows for collecting bulk sets of validator duties without risking exceeding the max
/// URL length with query pairs.
pub fn post_validator_duties<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> BoxFut {
let response_builder = ResponseBuilder::new(&req);
let future = req
.into_body()
.concat2()
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))
.and_then(|chunks| {
serde_json::from_slice::<BulkValidatorDutiesRequest>(&chunks).map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into BulkValidatorDutiesRequest: {:?}",
e
))
})
})
.and_then(|bulk_request| {
return_validator_duties(beacon_chain, bulk_request.epoch, bulk_request.pubkeys)
})
.and_then(|duties| response_builder?.body_no_ssz(&duties));
Box::new(future)
}
/// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch
pub fn get_validator_duties<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let log = get_logger_from_request(&req);
slog::trace!(log, "Validator duties requested of API: {:?}", &req);
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
let mut head_state = beacon_chain.head().beacon_state;
slog::trace!(log, "Got head state from request.");
// Parse and check query parameters
///
/// The given `epoch` must be within one epoch of the current epoch.
pub fn get_validator_duties<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let query = UrlQuery::from_request(&req)?;
let current_epoch = head_state.current_epoch();
let epoch = match query.first_of(&["epoch"]) {
Ok((_, v)) => {
slog::trace!(log, "Requested epoch {:?}", v);
Epoch::new(v.parse::<u64>().map_err(|e| {
slog::info!(log, "Invalid epoch {:?}", e);
ApiError::BadRequest(format!("Invalid epoch parameter, must be a u64. {:?}", e))
})?)
}
Err(_) => {
// epoch not supplied, use the current epoch
slog::info!(log, "Using default epoch {:?}", current_epoch);
current_epoch
}
};
let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|e| {
slog::info!(log, "Requested epoch out of range.");
ApiError::BadRequest(format!(
"Cannot get RelativeEpoch, epoch out of range: {:?}",
e
))
})?;
let validators: Vec<PublicKey> = query
let epoch = query.epoch()?;
let validator_pubkeys = query
.all_of("validator_pubkeys")?
.iter()
.map(|pk| parse_pubkey(pk))
.collect::<Result<Vec<_>, _>>()?;
let mut duties: Vec<ValidatorDuty> = Vec::new();
.map(|validator_pubkey_str| parse_pubkey(validator_pubkey_str))
.collect::<Result<_, _>>()?;
// Build cache for the requested epoch
head_state
let duties = return_validator_duties(beacon_chain, epoch, validator_pubkeys)?;
ResponseBuilder::new(&req)?.body_no_ssz(&duties)
}
fn return_validator_duties<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>,
epoch: Epoch,
validator_pubkeys: Vec<PublicKey>,
) -> Result<Vec<ValidatorDuty>, ApiError> {
let mut state = beacon_chain
.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
.map_err(|e| {
ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e))
})?;
let current_epoch = state.current_epoch();
let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|_| {
ApiError::BadRequest(format!(
"Epoch must be within one epoch of the current epoch",
))
})?;
state
.build_committee_cache(relative_epoch, &beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
// Get a list of all validators for this epoch
let validator_proposers: Vec<usize> = epoch
state
.update_pubkey_cache()
.map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?;
// Get a list of all validators for this epoch.
//
// Used for quickly determining the slot for a proposer.
let validator_proposers: Vec<(usize, Slot)> = epoch
.slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| {
head_state
state
.get_beacon_proposer_index(slot, &beacon_chain.spec)
.map(|i| (i, slot))
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to get proposer index for validator: {:?}",
@ -98,83 +129,59 @@ pub fn get_validator_duties<T: BeaconChainTypes + 'static>(req: Request<Body>) -
))
})
})
.collect::<Result<Vec<usize>, _>>()?;
.collect::<Result<Vec<_>, _>>()?;
// Look up duties for each validator
for val_pk in validators {
let mut duty = ValidatorDuty::new();
duty.validator_pubkey = val_pk.as_hex_string();
validator_pubkeys
.into_iter()
.map(|validator_pubkey| {
if let Some(validator_index) =
state.get_validator_index(&validator_pubkey).map_err(|e| {
ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e))
})?
{
let duties = state
.get_attestation_duties(validator_index, relative_epoch)
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to obtain attestation duties: {:?}",
e
))
})?;
// Get the validator index
// If it does not exist in the index, just add a null duty and move on.
let val_index: usize = match head_state.get_validator_index(&val_pk) {
Ok(Some(i)) => i,
Ok(None) => {
duties.append(&mut vec![duty]);
continue;
let block_proposal_slot = validator_proposers
.iter()
.find(|(i, _slot)| validator_index == *i)
.map(|(_i, slot)| *slot);
Ok(ValidatorDuty {
validator_pubkey,
attestation_slot: duties.map(|d| d.slot),
attestation_committee_index: duties.map(|d| d.index),
attestation_committee_position: duties.map(|d| d.committee_position),
block_proposal_slot,
})
} else {
Ok(ValidatorDuty {
validator_pubkey,
attestation_slot: None,
attestation_committee_index: None,
attestation_committee_position: None,
block_proposal_slot: None,
})
}
Err(e) => {
return Err(ApiError::ServerError(format!(
"Unable to read validator index cache. {:?}",
e
)));
}
};
// Set attestation duties
match head_state.get_attestation_duties(val_index, relative_epoch) {
Ok(Some(d)) => {
duty.attestation_slot = Some(d.slot);
duty.attestation_committee_index = Some(d.index);
}
Ok(None) => {}
Err(e) => {
return Err(ApiError::ServerError(format!(
"unable to read cache for attestation duties: {:?}",
e
)))
}
};
// If the validator is to propose a block, identify the slot
if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) {
duty.block_proposal_slot = Some(Slot::new(
relative_epoch
.into_epoch(current_epoch)
.start_slot(T::EthSpec::slots_per_epoch())
.as_u64()
+ slot as u64,
));
}
duties.append(&mut vec![duty]);
}
ResponseBuilder::new(&req)?.body_no_ssz(&duties)
})
.collect::<Result<Vec<_>, ApiError>>()
}
/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator.
pub fn get_new_beacon_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
pub fn get_new_beacon_block<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let query = UrlQuery::from_request(&req)?;
let slot = query
.first_of(&["slot"])
.map(|(_key, value)| value)?
.parse::<u64>()
.map(Slot::from)
.map_err(|e| {
ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e))
})?;
let randao_bytes = query
.first_of(&["randao_reveal"])
.map(|(_key, value)| value)
.map(hex::decode)?
.map_err(|e| {
ApiError::BadRequest(format!("Invalid hex string for randao_reveal: {:?}", e))
})?;
let randao_reveal = Signature::from_bytes(randao_bytes.as_slice()).map_err(|e| {
ApiError::BadRequest(format!("randao_reveal is not a valid signature: {:?}", e))
})?;
let slot = query.slot()?;
let randao_reveal = query.randao_reveal()?;
let (new_block, _state) = beacon_chain
.produce_block(randao_reveal, slot)
@ -189,35 +196,21 @@ pub fn get_new_beacon_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -
}
/// HTTP Handler to publish a BeaconBlock, which has been signed by a validator.
pub fn publish_beacon_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -> BoxFut {
pub fn publish_beacon_block<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
network_chan: NetworkChannel,
log: Logger,
) -> BoxFut {
try_future!(check_content_type_for_json(&req));
let log = get_logger_from_request(&req);
let beacon_chain = try_future!(get_beacon_chain_from_request::<T>(&req));
// Get the network sending channel from the request, for later transmission
let network_chan = req
.extensions()
.get::<Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>>()
.expect("Should always get the network channel from the request, since we put it in there.")
.clone();
let response_builder = ResponseBuilder::new(&req);
let body = req.into_body();
trace!(
log,
"Got the request body, now going to parse it into a block."
);
Box::new(body
.concat2()
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e)))
.map(|chunk| chunk.iter().cloned().collect::<Vec<u8>>())
.and_then(|chunks| {
serde_json::from_slice(&chunks.as_slice()).map_err(|e| {
ApiError::BadRequest(format!(
"Unable to deserialize JSON into a BeaconBlock: {:?}",
e
))
})
serde_json::from_slice(&chunks).map_err(|e| ApiError::BadRequest(format!("Unable to parse JSON into BeaconBlock: {:?}",e)))
})
.and_then(move |block: BeaconBlock<T::EthSpec>| {
let slot = block.slot;
@ -248,131 +241,34 @@ pub fn publish_beacon_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -
}
/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator.
pub fn get_new_attestation<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
let beacon_chain = get_beacon_chain_from_request::<T>(&req)?;
let mut head_state = beacon_chain.head().beacon_state;
pub fn get_new_attestation<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
) -> ApiResult {
let query = UrlQuery::from_request(&req)?;
let val_pk_str = query
.first_of(&["validator_pubkey"])
.map(|(_key, value)| value)?;
let val_pk = parse_pubkey(val_pk_str.as_str())?;
head_state
.update_pubkey_cache()
.map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?;
// Get the validator index from the supplied public key
// If it does not exist in the index, we cannot continue.
let val_index = head_state
.get_validator_index(&val_pk)
.map_err(|e| {
ApiError::ServerError(format!("Unable to read validator index cache. {:?}", e))
})?
.ok_or_else(|| {
ApiError::BadRequest(
"The provided validator public key does not correspond to a validator index."
.into(),
)
})?;
let slot = query.slot()?;
let index = query.committee_index()?;
// Build cache for the requested epoch
head_state
.build_committee_cache(RelativeEpoch::Current, &beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
// Get the duties of the validator, to make sure they match up.
// If they don't have duties this epoch, then return an error
let val_duty = head_state
.get_attestation_duties(val_index, RelativeEpoch::Current)
.map_err(|e| {
ApiError::ServerError(format!(
"unable to read cache for attestation duties: {:?}",
e
))
})?
.ok_or_else(|| ApiError::BadRequest("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?;
// Check that we are requesting an attestation during the slot where it is relevant.
let present_slot = beacon_chain.slot().map_err(|e| ApiError::ServerError(
format!("Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun. {:?}", e)
))?;
if val_duty.slot != present_slot {
return Err(ApiError::BadRequest(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot)));
}
// Parse the POC bit and insert it into the aggregation bits
let poc_bit = query
.first_of(&["poc_bit"])
.map(|(_key, value)| value)?
.parse::<bool>()
.map_err(|e| {
ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e))
})?;
let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len)
.expect("An empty BitList should always be created, or we have bigger problems.");
aggregation_bits
.set(val_duty.committee_position, poc_bit)
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to set aggregation bits for the attestation: {:?}",
e
))
})?;
// Allow a provided slot parameter to check against the expected slot as a sanity check only.
// Presently, we don't support attestations at future or past slots.
let requested_slot = query
.first_of(&["slot"])
.map(|(_key, value)| value)?
.parse::<u64>()
.map(Slot::from)
.map_err(|e| {
ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e))
})?;
let current_slot = beacon_chain.head().beacon_state.slot.as_u64();
if requested_slot != current_slot {
return Err(ApiError::BadRequest(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot)));
}
let index = query
.first_of(&["index"])
.map(|(_key, value)| value)?
.parse::<u64>()
.map_err(|e| ApiError::BadRequest(format!("Index is not a valid u64 value: {:?}", e)))?;
let attestation_data = beacon_chain
.produce_attestation_data(current_slot.into(), index)
.map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?;
let attestation: Attestation<T::EthSpec> = Attestation {
aggregation_bits,
data: attestation_data,
signature: AggregateSignature::new(),
};
let attestation = beacon_chain
.produce_attestation(slot, index)
.map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e)))?;
ResponseBuilder::new(&req)?.body(&attestation)
}
/// HTTP Handler to publish an Attestation, which has been signed by a validator.
pub fn publish_attestation<T: BeaconChainTypes + 'static>(req: Request<Body>) -> BoxFut {
pub fn publish_attestation<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
network_chan: NetworkChannel,
log: Logger,
) -> BoxFut {
try_future!(check_content_type_for_json(&req));
let log = get_logger_from_request(&req);
let beacon_chain = try_future!(get_beacon_chain_from_request::<T>(&req));
// Get the network sending channel from the request, for later transmission
let network_chan = req
.extensions()
.get::<Arc<RwLock<mpsc::UnboundedSender<NetworkMessage>>>>()
.expect("Should always get the network channel from the request, since we put it in there.")
.clone();
let response_builder = ResponseBuilder::new(&req);
let body = req.into_body();
trace!(
log,
"Got the request body, now going to parse it into an attesation."
);
Box::new(body
Box::new(req
.into_body()
.concat2()
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e)))
.map(|chunk| chunk.iter().cloned().collect::<Vec<u8>>())

View File

@ -0,0 +1,571 @@
#![cfg(test)]
use beacon_chain::{BeaconChain, BeaconChainTypes};
use node_test_rig::{
environment::{Environment, EnvironmentBuilder},
testing_client_config, ClientGenesis, LocalBeaconNode,
};
use remote_beacon_node::{PublishStatus, ValidatorDuty};
use std::sync::Arc;
use tree_hash::TreeHash;
use types::{
test_utils::generate_deterministic_keypair, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec,
MinimalEthSpec, PublicKey, RelativeEpoch, Signature, Slot,
};
use version;
type E = MinimalEthSpec;
fn build_env() -> Environment<E> {
EnvironmentBuilder::minimal()
.null_logger()
.expect("should build env logger")
.single_thread_tokio_runtime()
.expect("should start tokio runtime")
.build()
.expect("environment should build")
}
/// Returns the randao reveal for the given slot (assuming the given `beacon_chain` uses
/// deterministic keypairs).
fn get_randao_reveal<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>,
slot: Slot,
spec: &ChainSpec,
) -> Signature {
let fork = beacon_chain.head().beacon_state.fork.clone();
let proposer_index = beacon_chain
.block_proposer(slot)
.expect("should get proposer index");
let keypair = generate_deterministic_keypair(proposer_index);
let epoch = slot.epoch(E::slots_per_epoch());
let message = epoch.tree_hash_root();
let domain = spec.get_domain(epoch, Domain::Randao, &fork);
Signature::new(&message, domain, &keypair.sk)
}
/// Signs the given block (assuming the given `beacon_chain` uses deterministic keypairs).
fn sign_block<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>,
block: &mut BeaconBlock<T::EthSpec>,
spec: &ChainSpec,
) {
let fork = beacon_chain.head().beacon_state.fork.clone();
let proposer_index = beacon_chain
.block_proposer(block.slot)
.expect("should get proposer index");
let keypair = generate_deterministic_keypair(proposer_index);
block.sign(&keypair.sk, &fork, spec);
}
#[test]
fn validator_produce_attestation() {
let mut env = build_env();
let spec = &E::default_spec();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let beacon_chain = node
.client
.beacon_chain()
.expect("client should have beacon chain");
let state = beacon_chain.head().beacon_state.clone();
let validator_index = 0;
let duties = state
.get_attestation_duties(validator_index, RelativeEpoch::Current)
.expect("should have attestation duties cache")
.expect("should have attestation duties");
let mut attestation = env
.runtime()
.block_on(
remote_node
.http
.validator()
.produce_attestation(duties.slot, duties.index),
)
.expect("should fetch attestation from http api");
assert_eq!(
attestation.data.index, duties.index,
"should have same index"
);
assert_eq!(attestation.data.slot, duties.slot, "should have same slot");
assert_eq!(
attestation.aggregation_bits.num_set_bits(),
0,
"should have empty aggregation bits"
);
let keypair = generate_deterministic_keypair(validator_index);
// Fetch the duties again, but via HTTP for authenticity.
let duties = env
.runtime()
.block_on(remote_node.http.validator().get_duties(
attestation.data.slot.epoch(E::slots_per_epoch()),
&[keypair.pk.clone()],
))
.expect("should fetch duties from http api");
let duties = &duties[0];
// Try publishing the attestation without a signature, ensure it is flagged as invalid.
let publish_status = env
.runtime()
.block_on(
remote_node
.http
.validator()
.publish_attestation(attestation.clone()),
)
.expect("should publish attestation");
assert!(
!publish_status.is_valid(),
"the unsigned published attestation should not be valid"
);
attestation
.sign(
&keypair.sk,
duties
.attestation_committee_position
.expect("should have committee position"),
&state.fork,
spec,
)
.expect("should sign attestation");
// Try publishing the valid attestation.
let publish_status = env
.runtime()
.block_on(
remote_node
.http
.validator()
.publish_attestation(attestation.clone()),
)
.expect("should publish attestation");
assert!(
publish_status.is_valid(),
"the signed published attestation should be valid"
);
}
#[test]
fn validator_duties_bulk() {
let mut env = build_env();
let spec = &E::default_spec();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let beacon_chain = node
.client
.beacon_chain()
.expect("client should have beacon chain");
let epoch = Epoch::new(0);
let validators = beacon_chain
.head()
.beacon_state
.validators
.iter()
.map(|v| v.pubkey.clone())
.collect::<Vec<_>>();
let duties = env
.runtime()
.block_on(
remote_node
.http
.validator()
.get_duties_bulk(epoch, &validators),
)
.expect("should fetch duties from http api");
check_duties(duties, epoch, validators, beacon_chain, spec);
}
#[test]
fn validator_duties() {
let mut env = build_env();
let spec = &E::default_spec();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let beacon_chain = node
.client
.beacon_chain()
.expect("client should have beacon chain");
let epoch = Epoch::new(0);
let validators = beacon_chain
.head()
.beacon_state
.validators
.iter()
.map(|v| v.pubkey.clone())
.collect::<Vec<_>>();
let duties = env
.runtime()
.block_on(remote_node.http.validator().get_duties(epoch, &validators))
.expect("should fetch duties from http api");
check_duties(duties, epoch, validators, beacon_chain, spec);
}
fn check_duties<T: BeaconChainTypes>(
duties: Vec<ValidatorDuty>,
epoch: Epoch,
validators: Vec<PublicKey>,
beacon_chain: Arc<BeaconChain<T>>,
spec: &ChainSpec,
) {
assert_eq!(
validators.len(),
duties.len(),
"there should be a duty for each validator"
);
let state = beacon_chain.head().beacon_state.clone();
validators
.iter()
.zip(duties.iter())
.for_each(|(validator, duty)| {
assert_eq!(*validator, duty.validator_pubkey, "pubkey should match");
let validator_index = state
.get_validator_index(validator)
.expect("should have pubkey cache")
.expect("pubkey should exist");
let attestation_duty = state
.get_attestation_duties(validator_index, RelativeEpoch::Current)
.expect("should have attestation duties cache")
.expect("should have attestation duties");
assert_eq!(
Some(attestation_duty.slot),
duty.attestation_slot,
"attestation slot should match"
);
assert_eq!(
Some(attestation_duty.index),
duty.attestation_committee_index,
"attestation index should match"
);
if let Some(slot) = duty.block_proposal_slot {
let expected_proposer = state
.get_beacon_proposer_index(slot, spec)
.expect("should know proposer");
assert_eq!(
expected_proposer, validator_index,
"should get correct proposal slot"
);
} else {
epoch.slot_iter(E::slots_per_epoch()).for_each(|slot| {
let slot_proposer = state
.get_beacon_proposer_index(slot, spec)
.expect("should know proposer");
assert!(
slot_proposer != validator_index,
"validator should not have proposal slot in this epoch"
)
})
}
});
}
#[test]
fn validator_block_post() {
let mut env = build_env();
let spec = &E::default_spec();
let mut config = testing_client_config();
config.genesis = ClientGenesis::Interop {
validator_count: 8,
genesis_time: 13_371_337,
};
let node = LocalBeaconNode::production(env.core_context(), config);
let remote_node = node.remote_node().expect("should produce remote node");
let beacon_chain = node
.client
.beacon_chain()
.expect("client should have beacon chain");
let slot = Slot::new(1);
let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec);
let mut block = env
.runtime()
.block_on(
remote_node
.http
.validator()
.produce_block(slot, randao_reveal.clone()),
)
.expect("should fetch block from http api");
// Try publishing the block without a signature, ensure it is flagged as invalid.
let publish_status = env
.runtime()
.block_on(remote_node.http.validator().publish_block(block.clone()))
.expect("should publish block");
assert!(
!publish_status.is_valid(),
"the unsigned published block should not be valid"
);
sign_block(beacon_chain.clone(), &mut block, spec);
let block_root = block.canonical_root();
let publish_status = env
.runtime()
.block_on(remote_node.http.validator().publish_block(block.clone()))
.expect("should publish block");
assert_eq!(
publish_status,
PublishStatus::Valid,
"the signed published block should be valid"
);
let head = env
.runtime()
.block_on(remote_node.http.beacon().get_head())
.expect("should get head");
assert_eq!(
head.block_root, block_root,
"the published block should become the head block"
);
}
#[test]
fn validator_block_get() {
let mut env = build_env();
let spec = &E::default_spec();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let beacon_chain = node
.client
.beacon_chain()
.expect("client should have beacon chain");
let slot = Slot::new(1);
let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec);
let block = env
.runtime()
.block_on(
remote_node
.http
.validator()
.produce_block(slot, randao_reveal.clone()),
)
.expect("should fetch block from http api");
let (expected_block, _state) = node
.client
.beacon_chain()
.expect("client should have beacon chain")
.produce_block(randao_reveal, slot)
.expect("should produce block");
assert_eq!(
block, expected_block,
"the block returned from the API should be as expected"
);
}
#[test]
fn beacon_state() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let (state_by_slot, root) = env
.runtime()
.block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0)))
.expect("should fetch state from http api");
let (state_by_root, root_2) = env
.runtime()
.block_on(remote_node.http.beacon().get_state_by_root(root))
.expect("should fetch state from http api");
let mut db_state = node
.client
.beacon_chain()
.expect("client should have beacon chain")
.state_at_slot(Slot::new(0))
.expect("should find state");
db_state.drop_all_caches();
assert_eq!(
root, root_2,
"the two roots returned from the api should be identical"
);
assert_eq!(
root,
db_state.canonical_root(),
"root from database should match that from the API"
);
assert_eq!(
state_by_slot, db_state,
"genesis state by slot from api should match that from the DB"
);
assert_eq!(
state_by_root, db_state,
"genesis state by root from api should match that from the DB"
);
}
#[test]
fn beacon_block() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let (block_by_slot, root) = env
.runtime()
.block_on(remote_node.http.beacon().get_block_by_slot(Slot::new(0)))
.expect("should fetch block from http api");
let (block_by_root, root_2) = env
.runtime()
.block_on(remote_node.http.beacon().get_block_by_root(root))
.expect("should fetch block from http api");
let db_block = node
.client
.beacon_chain()
.expect("client should have beacon chain")
.block_at_slot(Slot::new(0))
.expect("should find block")
.expect("block should not be none");
assert_eq!(
root, root_2,
"the two roots returned from the api should be identical"
);
assert_eq!(
root,
db_block.canonical_root(),
"root from database should match that from the API"
);
assert_eq!(
block_by_slot, db_block,
"genesis block by slot from api should match that from the DB"
);
assert_eq!(
block_by_root, db_block,
"genesis block by root from api should match that from the DB"
);
}
#[test]
fn genesis_time() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let genesis_time = env
.runtime()
.block_on(remote_node.http.beacon().get_genesis_time())
.expect("should fetch genesis time from http api");
assert_eq!(
node.client
.beacon_chain()
.expect("should have beacon chain")
.head()
.beacon_state
.genesis_time,
genesis_time,
"should match genesis time from head state"
);
}
#[test]
fn fork() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let fork = env
.runtime()
.block_on(remote_node.http.beacon().get_fork())
.expect("should fetch from http api");
assert_eq!(
node.client
.beacon_chain()
.expect("should have beacon chain")
.head()
.beacon_state
.fork,
fork,
"should match head state"
);
}
#[test]
fn eth2_config() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let eth2_config = env
.runtime()
.block_on(remote_node.http.spec().get_eth2_config())
.expect("should fetch eth2 config from http api");
// TODO: check the entire eth2_config, not just the spec.
assert_eq!(
node.client
.beacon_chain()
.expect("should have beacon chain")
.spec,
eth2_config.spec,
"should match genesis time from head state"
);
}
#[test]
fn get_version() {
let mut env = build_env();
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let version = env
.runtime()
.block_on(remote_node.http.node().get_version())
.expect("should fetch eth2 config from http api");
assert_eq!(version::version(), version, "result should be as expected");
}

View File

@ -1,23 +0,0 @@
[package]
name = "rpc"
version = "0.1.0"
authors = ["Age Manning <Age@AgeManning.com>"]
edition = "2018"
[dependencies]
bls = { path = "../../eth2/utils/bls" }
beacon_chain = { path = "../beacon_chain" }
network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" }
types = { path = "../../eth2/types" }
eth2_ssz = "0.1.2"
protos = { path = "../../protos" }
grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] }
clap = "2.33.0"
futures = "0.1.29"
serde = "1.0.102"
serde_derive = "1.0.102"
slog = { version = "2.5.2", features = ["max_level_trace"] }
tokio = "0.1.22"
exit-future = "0.1.4"

View File

@ -1,177 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2_libp2p::PubsubMessage;
use eth2_libp2p::Topic;
use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX};
use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage;
use protos::services::{
AttestationData as AttestationDataProto, ProduceAttestationDataRequest,
ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse,
};
use protos::services_grpc::AttestationService;
use slog::{error, info, trace, warn};
use ssz::{ssz_encode, Decode, Encode};
use std::sync::Arc;
use tokio::sync::mpsc;
use types::{Attestation, Slot};
pub struct AttestationServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
pub log: slog::Logger,
}
// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually.
impl<T: BeaconChainTypes> Clone for AttestationServiceInstance<T> {
fn clone(&self) -> Self {
Self {
chain: self.chain.clone(),
network_chan: self.network_chan.clone(),
log: self.log.clone(),
}
}
}
impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
/// Produce the `AttestationData` for signing by a validator.
fn produce_attestation_data(
&mut self,
ctx: RpcContext,
req: ProduceAttestationDataRequest,
sink: UnarySink<ProduceAttestationDataResponse>,
) {
trace!(
&self.log,
"Attempting to produce attestation at slot {}",
req.get_slot()
);
// Then get the AttestationData from the beacon chain
// NOTE(v0.9): shard is incorrectly named, all this should be deleted
let shard = req.get_shard();
let slot_requested = req.get_slot();
let attestation_data = match self
.chain
.produce_attestation_data(Slot::from(slot_requested), shard)
{
Ok(v) => v,
Err(e) => {
// Could not produce an attestation
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce an attestation: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let mut attestation_data_proto = AttestationDataProto::new();
attestation_data_proto.set_ssz(ssz_encode(&attestation_data));
let mut resp = ProduceAttestationDataResponse::new();
resp.set_attestation_data(attestation_data_proto);
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "Failed to reply with success {:?}: {:?}", req, e));
ctx.spawn(f)
}
/// Accept some fully-formed `FreeAttestation` from the validator,
/// store it, and aggregate it into an `Attestation`.
fn publish_attestation(
&mut self,
ctx: RpcContext,
req: PublishAttestationRequest,
sink: UnarySink<PublishAttestationResponse>,
) {
trace!(self.log, "Publishing attestation");
let mut resp = PublishAttestationResponse::new();
let ssz_serialized_attestation = req.get_attestation().get_ssz();
let attestation = match Attestation::from_ssz_bytes(ssz_serialized_attestation) {
Ok(v) => v,
Err(_) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid attestation".to_string()),
))
.map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f);
}
};
match self.chain.process_attestation(attestation.clone()) {
Ok(_) => {
// Attestation was successfully processed.
info!(
self.log,
"Valid attestation from RPC";
"target_epoch" => attestation.data.target.epoch,
"index" => attestation.data.index,
);
// valid attestation, propagate to the network
let topic_string = format!(
"/{}/{}/{}",
TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX
);
let topic = Topic::new(topic_string);
let message = PubsubMessage::Attestation(attestation.as_ssz_bytes());
self.network_chan
.try_send(NetworkMessage::Publish {
topics: vec![topic],
message,
})
.unwrap_or_else(|e| {
error!(
self.log,
"Failed to gossip attestation";
"error" => format!("{:?}", e)
);
});
resp.set_success(true);
}
Err(BeaconChainError::AttestationValidationError(e)) => {
// Attestation was invalid
warn!(
self.log,
"Invalid attestation from RPC";
"error" => format!("{:?}", e),
);
resp.set_success(false);
resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec());
}
Err(e) => {
// Some other error
warn!(
self.log,
"Failed to process attestation from RPC";
"error" => format!("{:?}", e),
);
resp.set_success(false);
resp.set_msg(
format!("There was a beacon chain error: {:?}", e)
.as_bytes()
.to_vec(),
);
}
};
let error_log = self.log.clone();
let f = sink
.success(resp)
.map_err(move |e| error!(error_log, "failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,185 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::{PubsubMessage, Topic};
use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX};
use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage;
use protos::services::{
BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse,
PublishBeaconBlockRequest, PublishBeaconBlockResponse,
};
use protos::services_grpc::BeaconBlockService;
use slog::Logger;
use slog::{error, info, trace, warn};
use ssz::{ssz_encode, Decode, Encode};
use std::sync::Arc;
use tokio::sync::mpsc;
use types::{BeaconBlock, Signature, Slot};
pub struct BeaconBlockServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
pub log: Logger,
}
// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually.
impl<T: BeaconChainTypes> Clone for BeaconBlockServiceInstance<T> {
fn clone(&self) -> Self {
Self {
chain: self.chain.clone(),
network_chan: self.network_chan.clone(),
log: self.log.clone(),
}
}
}
impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
/// Produce a `BeaconBlock` for signing by a validator.
fn produce_beacon_block(
&mut self,
ctx: RpcContext,
req: ProduceBeaconBlockRequest,
sink: UnarySink<ProduceBeaconBlockResponse>,
) {
trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req));
// decode the request
let requested_slot = Slot::from(req.get_slot());
let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) {
Ok(reveal) => reveal,
Err(_) => {
// decode error, incorrect signature
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid randao reveal signature".to_string()),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) {
Ok((block, _state)) => block,
Err(e) => {
// could not produce a block
let log_clone = self.log.clone();
warn!(self.log, "RPC Error"; "Error" => format!("Could not produce a block:{:?}",e));
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::Unknown,
Some(format!("Could not produce a block: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
assert_eq!(
produced_block.slot, requested_slot,
"should produce at the requested slot"
);
let mut block = BeaconBlockProto::new();
block.set_ssz(ssz_encode(&produced_block));
let mut resp = ProduceBeaconBlockResponse::new();
resp.set_block(block);
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
/// Accept some fully-formed `BeaconBlock`, process and publish it.
fn publish_beacon_block(
&mut self,
ctx: RpcContext,
req: PublishBeaconBlockRequest,
sink: UnarySink<PublishBeaconBlockResponse>,
) {
trace!(&self.log, "Attempting to publish a block");
let mut resp = PublishBeaconBlockResponse::new();
let ssz_serialized_block = req.get_block().get_ssz();
match BeaconBlock::from_ssz_bytes(ssz_serialized_block) {
Ok(block) => {
match self.chain.process_block(block.clone()) {
Ok(outcome) => {
if let BlockProcessingOutcome::Processed { block_root } = outcome {
// Block was successfully processed.
info!(
self.log,
"Valid block from RPC";
"root" => format!("{}", block_root),
"slot" => block.slot,
);
// create the network topic to send on
let topic_string = format!(
"/{}/{}/{}",
TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX
);
let topic = Topic::new(topic_string);
let message = PubsubMessage::Block(block.as_ssz_bytes());
// Publish the block to the p2p network via gossipsub.
self.network_chan
.try_send(NetworkMessage::Publish {
topics: vec![topic],
message,
})
.unwrap_or_else(|e| {
error!(
self.log,
"Failed to gossip beacon block";
"error" => format!("{:?}", e)
);
});
resp.set_success(true);
} else {
// Block was not successfully processed.
warn!(
self.log,
"Invalid block from RPC";
"outcome" => format!("{:?}", outcome)
);
resp.set_success(false);
resp.set_msg(
format!("InvalidBlock: {:?}", outcome).as_bytes().to_vec(),
);
}
}
Err(e) => {
// Some failure during processing.
error!(
self.log,
"Failed to process beacon block";
"error" => format!("{:?}", e)
);
resp.set_success(false);
resp.set_msg(format!("failed_to_process: {:?}", e).as_bytes().to_vec());
}
}
resp.set_success(true);
}
Err(_) => {
resp.set_success(false);
resp.set_msg(b"Invalid SSZ".to_vec());
}
};
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,58 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::Future;
use grpcio::{RpcContext, UnarySink};
use protos::services::{Empty, Fork, NodeInfoResponse};
use protos::services_grpc::BeaconNodeService;
use slog::{trace, warn};
use std::sync::Arc;
pub struct BeaconNodeServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
pub log: slog::Logger,
}
// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually.
impl<T: BeaconChainTypes> Clone for BeaconNodeServiceInstance<T> {
fn clone(&self) -> Self {
Self {
chain: self.chain.clone(),
log: self.log.clone(),
}
}
}
impl<T: BeaconChainTypes> BeaconNodeService for BeaconNodeServiceInstance<T> {
/// Provides basic node information.
fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink<NodeInfoResponse>) {
trace!(self.log, "Node info requested via RPC");
// build the response
let mut node_info = NodeInfoResponse::new();
node_info.set_version(version::version());
// get the chain state
let state = &self.chain.head().beacon_state;
let state_fork = state.fork.clone();
let genesis_time = state.genesis_time;
// build the rpc fork struct
let mut fork = Fork::new();
fork.set_previous_version(state_fork.previous_version.to_vec());
fork.set_current_version(state_fork.current_version.to_vec());
fork.set_epoch(state_fork.epoch.into());
let spec = &self.chain.spec;
node_info.set_fork(fork);
node_info.set_genesis_time(genesis_time);
node_info.set_genesis_slot(spec.genesis_slot.as_u64());
node_info.set_network_id(u32::from(spec.network_id));
// send the node_info the requester
let error_log = self.log.clone();
let f = sink
.success(node_info)
.map_err(move |e| warn!(error_log, "failed to reply {:?}", e));
ctx.spawn(f)
}
}

View File

@ -1,44 +0,0 @@
use clap::ArgMatches;
use serde_derive::{Deserialize, Serialize};
use std::net::Ipv4Addr;
/// RPC Configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// Enable the RPC server.
pub enabled: bool,
/// The IPv4 address the RPC will listen on.
pub listen_address: Ipv4Addr,
/// The port the RPC will listen on.
pub port: u16,
}
impl Default for Config {
fn default() -> Self {
Config {
enabled: true,
listen_address: Ipv4Addr::new(127, 0, 0, 1),
port: 5051,
}
}
}
impl Config {
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if args.is_present("no-grpc") {
self.enabled = false;
}
if let Some(rpc_address) = args.value_of("rpc-address") {
self.listen_address = rpc_address
.parse::<Ipv4Addr>()
.map_err(|_| "rpc-address is not IPv4 address")?;
}
if let Some(rpc_port) = args.value_of("rpc-port") {
self.port = rpc_port.parse::<u16>().map_err(|_| "rpc-port is not u16")?;
}
Ok(())
}
}

View File

@ -1,101 +0,0 @@
mod attestation;
mod beacon_block;
mod beacon_node;
pub mod config;
mod validator;
use self::attestation::AttestationServiceInstance;
use self::beacon_block::BeaconBlockServiceInstance;
use self::beacon_node::BeaconNodeServiceInstance;
use self::validator::ValidatorServiceInstance;
use beacon_chain::{BeaconChain, BeaconChainTypes};
pub use config::Config;
use futures::Future;
use grpcio::{Environment, ServerBuilder};
use network::NetworkMessage;
use protos::services_grpc::{
create_attestation_service, create_beacon_block_service, create_beacon_node_service,
create_validator_service,
};
use slog::{info, warn};
use std::sync::Arc;
use tokio::runtime::TaskExecutor;
use tokio::sync::mpsc;
pub fn start_server<T: BeaconChainTypes>(
config: &Config,
executor: &TaskExecutor,
network_chan: mpsc::UnboundedSender<NetworkMessage>,
beacon_chain: Arc<BeaconChain<T>>,
log: slog::Logger,
) -> exit_future::Signal {
let env = Arc::new(Environment::new(1));
// build a channel to kill the rpc server
let (rpc_exit_signal, rpc_exit) = exit_future::signal();
// build the individual rpc services
let beacon_node_service = {
let instance = BeaconNodeServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_beacon_node_service(instance)
};
let beacon_block_service = {
let instance = BeaconBlockServiceInstance {
chain: beacon_chain.clone(),
network_chan: network_chan.clone(),
log: log.clone(),
};
create_beacon_block_service(instance)
};
let validator_service = {
let instance = ValidatorServiceInstance {
chain: beacon_chain.clone(),
log: log.clone(),
};
create_validator_service(instance)
};
let attestation_service = {
let instance = AttestationServiceInstance {
network_chan,
chain: beacon_chain.clone(),
log: log.clone(),
};
create_attestation_service(instance)
};
let mut server = ServerBuilder::new(env)
.register_service(beacon_block_service)
.register_service(validator_service)
.register_service(beacon_node_service)
.register_service(attestation_service)
.bind(config.listen_address.to_string(), config.port)
.build()
.unwrap();
let spawn_rpc = {
server.start();
for &(ref host, port) in server.bind_addrs() {
info!(
log,
"gRPC API started";
"port" => port,
"host" => host,
);
}
rpc_exit.and_then(move |_| {
info!(log, "RPC Server shutting down");
server
.shutdown()
.wait()
.map(|_| ())
.map_err(|e| warn!(log, "RPC server failed to shutdown: {:?}", e))?;
Ok(())
})
};
executor.spawn(spawn_rpc);
rpc_exit_signal
}

View File

@ -1,185 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use bls::PublicKey;
use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use protos::services::{ActiveValidator, GetDutiesRequest, GetDutiesResponse, ValidatorDuty};
use protos::services_grpc::ValidatorService;
use slog::{trace, warn};
use ssz::Decode;
use std::sync::Arc;
use types::{Epoch, EthSpec, RelativeEpoch};
pub struct ValidatorServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
pub log: slog::Logger,
}
// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually.
impl<T: BeaconChainTypes> Clone for ValidatorServiceInstance<T> {
fn clone(&self) -> Self {
Self {
chain: self.chain.clone(),
log: self.log.clone(),
}
}
}
impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> {
/// For a list of validator public keys, this function returns the slot at which each
/// validator must propose a block, attest to a shard, their shard committee and the shard they
/// need to attest to.
fn get_validator_duties(
&mut self,
ctx: RpcContext,
req: GetDutiesRequest,
sink: UnarySink<GetDutiesResponse>,
) {
trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch());
let validators = req.get_validators();
let epoch = Epoch::from(req.get_epoch());
let slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
let mut state = if let Ok(state) = self.chain.state_at_slot(slot) {
state.clone()
} else {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some("No state".to_string()),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
};
let _ = state.build_all_caches(&self.chain.spec);
assert_eq!(
state.current_epoch(),
epoch,
"Retrieved state should be from the same epoch"
);
let mut resp = GetDutiesResponse::new();
let resp_validators = resp.mut_active_validators();
let validator_proposers: Result<Vec<usize>, _> = epoch
.slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| state.get_beacon_proposer_index(slot, &self.chain.spec))
.collect();
let validator_proposers = match validator_proposers {
Ok(v) => v,
Err(e) => {
// could not get the validator proposer index
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Could not find beacon proposers: {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "failed to reply {:?} : {:?}", req, e));
return ctx.spawn(f);
}
};
// get the duties for each validator
for validator_pk in validators.get_public_keys() {
let mut active_validator = ActiveValidator::new();
let public_key = match PublicKey::from_ssz_bytes(validator_pk) {
Ok(v) => v,
Err(_) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some("Invalid public_key".to_string()),
))
.map_err(move |_| warn!(log_clone, "failed to reply {:?}", req));
return ctx.spawn(f);
}
};
// get the validator index
let val_index = match state.get_validator_index(&public_key) {
Ok(Some(index)) => index,
Ok(None) => {
// index not present in registry, set the duties for this key to None
warn!(
self.log,
"RPC requested a public key that is not in the registry: {:?}", public_key
);
active_validator.set_none(false);
resp_validators.push(active_validator);
continue;
}
// the cache is not built, throw an error
Err(e) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Beacon state error {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "Failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// get attestation duties and check if validator is active
let attestation_duties = match state
.get_attestation_duties(val_index, RelativeEpoch::Current)
{
Ok(Some(v)) => v,
Ok(_) => {
// validator is inactive, go to the next validator
warn!(
self.log,
"RPC requested an inactive validator key: {:?}", public_key
);
active_validator.set_none(false);
resp_validators.push(active_validator);
continue;
}
// the cache is not built, throw an error
Err(e) => {
let log_clone = self.log.clone();
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::FailedPrecondition,
Some(format!("Beacon state error {:?}", e)),
))
.map_err(move |e| warn!(log_clone, "Failed to reply {:?}: {:?}", req, e));
return ctx.spawn(f);
}
};
// we have an active validator, set its duties
let mut duty = ValidatorDuty::new();
// check if the validator needs to propose a block
if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) {
duty.set_block_production_slot(
epoch.start_slot(T::EthSpec::slots_per_epoch()).as_u64() + slot as u64,
);
} else {
// no blocks to propose this epoch
duty.set_none(false)
}
duty.set_committee_index(attestation_duties.committee_position as u64);
duty.set_attestation_slot(attestation_duties.slot.as_u64());
duty.set_attestation_shard(attestation_duties.index);
duty.set_committee_len(attestation_duties.committee_len as u64);
active_validator.set_duty(duty);
resp_validators.push(active_validator);
}
let f = sink
.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
}

View File

@ -1,22 +1,14 @@
use clap::{App, Arg, SubCommand};
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("Beacon Node")
.visible_aliases(&["b", "bn", "beacon", "beacon_node"])
App::new("beacon_node")
.visible_aliases(&["b", "bn", "beacon"])
.version(crate_version!())
.author("Sigma Prime <contact@sigmaprime.io>")
.about("Eth 2.0 Client")
/*
* Configuration directory locations.
*/
.arg(
Arg::with_name("datadir")
.long("datadir")
.value_name("DIR")
.help("Data directory for keys and databases.")
.takes_value(true)
.global(true)
)
.arg(
Arg::with_name("network-dir")
.long("network-dir")
@ -103,30 +95,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.help("A secp256k1 secret key, represented as ASCII-encoded hex bytes (with or without 0x prefix).")
.takes_value(true),
)
/*
* gRPC parameters.
*/
.arg(
Arg::with_name("no-grpc")
.long("no-grpc")
.help("Disable the gRPC server.")
.takes_value(false),
)
.arg(
Arg::with_name("rpc-address")
.long("rpc-address")
.value_name("ADDRESS")
.help("Listen address for RPC endpoint.")
.takes_value(true),
)
.arg(
Arg::with_name("rpc-port")
.long("rpc-port")
.value_name("PORT")
.help("Listen port for RPC endpoint.")
.conflicts_with("port-bump")
.takes_value(true),
)
/* REST API related arguments */
.arg(
Arg::with_name("no-api")
@ -299,7 +267,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.short("m")
.value_name("MINUTES")
.required(true)
.default_value("15")
.default_value("0")
.help("The maximum number of minutes that will have elapsed before genesis"))
)
/*

View File

@ -587,7 +587,6 @@ impl ConfigBuilder {
.map_err(|e| format!("Unable to parse default listen address: {:?}", e))?;
self.client_config.network.listen_address = addr.into();
self.client_config.rpc.listen_address = addr;
self.client_config.rest_api.listen_address = addr;
Ok(())
@ -607,7 +606,6 @@ impl ConfigBuilder {
self.client_config.network.libp2p_port += bump;
self.client_config.network.discovery_port += bump;
self.client_config.rpc.port += bump;
self.client_config.rest_api.port += bump;
self.client_config.websocket_server.port += bump;
}

View File

@ -125,7 +125,6 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
.build_beacon_chain()?
.libp2p_network(&client_config.network)?
.http_server(&client_config, &http_eth2_config)?
.grpc_server(&client_config.rpc)?
.peer_count_notifier()?
.slot_notifier()?;

View File

@ -1,6 +1,6 @@
#![cfg(test)]
use node_test_rig::{environment::EnvironmentBuilder, LocalBeaconNode};
use node_test_rig::{environment::EnvironmentBuilder, testing_client_config, LocalBeaconNode};
use types::{MinimalEthSpec, Slot};
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
@ -17,12 +17,12 @@ fn http_server_genesis_state() {
.build()
.expect("environment should build");
let node = LocalBeaconNode::production(env.core_context());
let node = LocalBeaconNode::production(env.core_context(), testing_client_config());
let remote_node = node.remote_node().expect("should produce remote node");
let (api_state, _root) = env
.runtime()
.block_on(remote_node.http.beacon().state_at_slot(Slot::new(0)))
.block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0)))
.expect("should fetch state from http api");
let mut db_state = node

View File

@ -1,6 +1,8 @@
# Summary
* [Introduction](./intro.md)
* [Installation](./installation.md)
* [Docker](./docker.md)
* [CLI](./cli.md)
* [Testnets](./testnets.md)
* [Simple Local Testnet](./simple-testnet.md)
@ -9,4 +11,3 @@
* [WebSocket](./websockets.md)
* [Contributing](./contributing.md)
* [Development Environment](./setup.md)
* [CI & Testing](./ci.md)

View File

@ -1,33 +0,0 @@
# Contiguous Integration (CI) and Testing
Lighthouse uses a self-hosted Gitlab CI server to run tests and deploy docs.
For security reasons, **CI will only be run automatically for Lighthouse
maintainers.** Contributors without maintainer privileges will need to have CI
triggered for them prior to a PR being merged.
You can see the full set of tests we run in the
[gitlab-ci.yml](https://github.com/sigp/lighthouse/blob/master/.gitlab-ci.yml)
file. The following two commands should complete successfully before CI can
pass:
```bash
$ cargo test --all --all-features
$ cargo fmt --all --check
```
_Note: Travis CI is also used, however it does not run the full test suite._
### Ethereum 2.0 Spec Tests
The
[ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/)
repository contains a large set of tests that verify Lighthouse behaviour
against the Ethereum Foundation specifications.
These tests are quite large (100's of MB), so we don't download them by
default. Developers should ensure they have downloaded these tests using the
`Makefile` in
[tests/ef_tests](https://github.com/sigp/lighthouse/tree/master/tests/ef_tests).
**Failures in these tests should prevent CI from passing.**

19
book/src/docker.md Normal file
View File

@ -0,0 +1,19 @@
# Docker Guide
This repository has a `Dockerfile` in the root which builds an image with the
`lighthouse` binary installed.
To use the image, first build it (this will likely take several minutes):
```bash
$ docker build . -t lighthouse
```
Once it's built, run it with:
```bash
$ docker run lighthouse lighthouse --help
```
_Note: the first `lighthouse` is the name of the tag we created earlier. The
second `lighthouse` refers to the binary installed in the image._

48
book/src/installation.md Normal file
View File

@ -0,0 +1,48 @@
# 📦 Installation
Lighthouse runs on Linux, MacOS and Windows. Installation should be easy. In
fact, if you already have Rust installed all you need is:
- `$ git clone https://github.com/sigp/lighthouse.git`
- `$ cd lighthouse`
- `$ make`
If this doesn't work or is not clear enough, see the [Detailed Instructions](#detailed-instructions). If you have further issues, see [Troubleshooting](#troubleshooting). If you'd prefer to use Docker, see the [Docker Guide](./docker.md).
## Detailed Instructions
1. Install Rust and Cargo with [rustup](https://rustup.rs/).
- Use the `stable` toolchain (it's the default).
1. Clone the Lighthouse repository.
- Run `$ git clone https://github.com/sigp/lighthouse.git`
- Change into the newly created directory with `$ cd lighthouse`
1. Build Lighthouse with `$ make`.
1. Installation was successful if `$ lighthouse --help` displays the
command-line documentation.
> First time compilation may take several minutes. If you experience any
> failures, please reach out on [discord](https://discord.gg/cyAszAh) or
> [create an issue](https://github.com/sigp/lighthouse/issues/new).
## Troubleshooting
### Command is not found
Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory
needs to be on your `PATH` before you can run `$ lighthouse`.
See ["Configuring the `PATH` environment variable"
(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information.
### OpenSSL
If you get a build failure relating to OpenSSL, try installing `openssl-dev` or
`libssl-dev` using your OS package manager.
- Ubuntu: `$ apt-get install libssl-dev`.
- Amazon Linux: `$ yum install openssl-devel`.
### Perl for Windows
Perl may also be required to build Lighthouse. You can install [Strawberry
Perl](http://strawberryperl.com/), or alternatively if you're using the [Chocolatey](https://chocolatey.org/) package manager for Windows, use the following choco install command: `choco install strawberryperl`.

View File

@ -19,7 +19,8 @@ We implement the specification as defined in the
You may read this book from start to finish, or jump to some of these topics:
- Get started with [development environment setup](./setup.md).
- Follow the [Installation Guide](./installation.md) to install Lighthouse.
- Get hacking with the [Development Environment Guide](./setup.md).
- Utilize the whole stack by starting a [simple local testnet](./simple-testnet.md).
- Query the [RESTful HTTP API](./http.md) using `curl`.
- Listen to events with the [JSON WebSocket API](./websockets.md).

View File

@ -1,41 +1,49 @@
# Development Environment Setup
# Development Environment
## Linux, MacOS & Windows
Most Lighthouse developers work on Linux or MacOS, however Windows should still
be suitable.
1. Install Rust and Cargo with [rustup](https://rustup.rs/).
- Use the `stable` toolchain (it's the default).
1. Install build dependencies using your package manager.
- `clang`, `protobuf`, `libssl-dev`, `cmake`
1. Clone the [github.com/sigp/lighthouse](https://github.com/sigp/lighthouse)
repository.
1. Run `$ make` to build Lighthouse.
1. Run `$ make test` to run the test suite
- If you experience any failures, please reach out on
[discord](https://discord.gg/cyAszAh).
- Developers use `$ make test-full` to ensure you have the full set of
test vectors.
First, follow the [`Installation Guide`](./installation.md) to install
Lighthouse. This will install Lighthouse to your `PATH`, which is not
particularly useful for development but still a good way to ensure you have the
base dependencies.
> - The `beacon_node`, `validator_client` and other binaries are created in
> `target/release` directory.
> - First-time compilation may take several minutes.
The only additional requirement for developers is
[`ganache-cli`](https://github.com/trufflesuite/ganache-cli). This is used to
simulate the Eth1 chain during tests. You'll get failures during tests if you
don't have `ganache-cli` available on your `PATH`.
### Installing to `PATH`
## Testing
Use `cargo install --path lighthouse` from the root of the repository to
install the compiled binary to `CARGO_HOME` or `$HOME/.cargo`. If this
directory is on your `PATH`, you can run `$ lighthouse ..` from anywhere.
As with most other Rust projects, Lighthouse uses `cargo test` for unit and
integration tests. For example, to test the `ssz` crate run:
See ["Configuring the `PATH` environment
variable" (rust-lang.org)](https://www.rust-lang.org/tools/install) for more information.
```bash
cd eth2/utils/ssz
cargo test
```
> If you _don't_ install `lighthouse` to the path, you'll need to run the
> binaries directly from the `target` directory or using `cargo run ...`.
We also wrap some of these commands and expose them via the `Makefile` in the
project root for the benefit of CI/CD. We list some of these commands below so
you can run them locally and avoid CI failures:
### Windows
- `$ make cargo-fmt`: (fast) runs a Rust code linter.
- `$ make test`: (medium) runs unit tests across the whole project.
- `$ make test-ef`: (medium) runs the Ethereum Foundation test vectors.
- `$ make test-full`: (slow) runs the full test suite (including all previous
commands). This is approximately everything
that is required to pass CI.
Perl may also be required to build Lighthouse. You can install [Strawberry
Perl](http://strawberryperl.com/), or alternatively if you're using the [Chocolatey](https://chocolatey.org/) package manager for Windows, use the following choco install command: `choco install strawberryperl`.
_The lighthouse test suite is quite extensive, running the whole suite may take 30+ minutes._
Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues
compiling in Windows. You can specify a known working version by editing
version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`.
### Ethereum 2.0 Spec Tests
The
[ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/)
repository contains a large set of tests that verify Lighthouse behaviour
against the Ethereum Foundation specifications.
These tests are quite large (100's of MB) so they're only downloaded if you run
`$ make test-ef` (or anything that run it). You may want to avoid
downloading these tests if you're on a slow or metered Internet connection. CI
will require them to pass, though.

View File

@ -1,4 +1,7 @@
use super::{AggregateSignature, AttestationData, BitList, EthSpec};
use super::{
AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey,
Signature,
};
use crate::test_utils::TestRandom;
use serde_derive::{Deserialize, Serialize};
@ -7,6 +10,12 @@ use test_random_derive::TestRandom;
use tree_hash::TreeHash;
use tree_hash_derive::{SignedRoot, TreeHash};
#[derive(Debug, PartialEq)]
pub enum Error {
SszTypesError(ssz_types::Error),
AlreadySigned(usize),
}
/// Details an attestation that can be slashable.
///
/// Spec v0.9.1
@ -48,6 +57,37 @@ impl<T: EthSpec> Attestation<T> {
self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits);
self.signature.add_aggregate(&other.signature);
}
/// Signs `self`, setting the `committee_position`'th bit of `aggregation_bits` to `true`.
///
/// Returns an `AlreadySigned` error if the `committee_position`'th bit is already `true`.
pub fn sign(
&mut self,
secret_key: &SecretKey,
committee_position: usize,
fork: &Fork,
spec: &ChainSpec,
) -> Result<(), Error> {
if self
.aggregation_bits
.get(committee_position)
.map_err(|e| Error::SszTypesError(e))?
{
Err(Error::AlreadySigned(committee_position))
} else {
self.aggregation_bits
.set(committee_position, true)
.map_err(|e| Error::SszTypesError(e))?;
let message = self.data.tree_hash_root();
let domain = spec.get_domain(self.data.target.epoch, Domain::BeaconAttester, fork);
self.signature
.add(&Signature::new(&message, domain, secret_key));
Ok(())
}
}
}
#[cfg(test)]

View File

@ -100,6 +100,13 @@ impl<T: EthSpec> BeaconBlock<T> {
..self.block_header()
}
}
/// Signs `self`.
pub fn sign(&mut self, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec) {
let message = self.signed_root();
let domain = spec.get_domain(self.epoch(), Domain::BeaconProposer, &fork);
self.signature = Signature::new(&message, domain, &secret_key);
}
}
#[cfg(test)]

View File

@ -39,7 +39,7 @@ pub mod validator;
use ethereum_types::{H160, H256};
pub use crate::attestation::Attestation;
pub use crate::attestation::{Attestation, Error as AttestationError};
pub use crate::attestation_data::AttestationData;
pub use crate::attestation_duty::AttestationDuty;
pub use crate::attester_slashing::AttesterSlashing;

View File

@ -0,0 +1 @@
contract/

View File

@ -0,0 +1,17 @@
[package]
name = "deposit_contract"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
build = "build.rs"
[build-dependencies]
reqwest = "0.9.20"
serde_json = "1.0"
[dependencies]
types = { path = "../../types"}
eth2_ssz = { path = "../ssz"}
tree_hash = { path = "../tree_hash"}
ethabi = "9.0"

View File

@ -0,0 +1,56 @@
use ethabi::{Contract, Token};
use ssz::Encode;
use types::DepositData;
pub use ethabi::Error;
pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000;
pub const DEPOSIT_GAS: usize = 4_000_000;
pub const ABI: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.json");
pub const BYTECODE: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.bytecode");
pub fn eth1_tx_data(deposit_data: &DepositData) -> Result<Vec<u8>, Error> {
let params = vec![
Token::Bytes(deposit_data.pubkey.as_ssz_bytes()),
Token::Bytes(deposit_data.withdrawal_credentials.as_ssz_bytes()),
Token::Bytes(deposit_data.signature.as_ssz_bytes()),
];
let abi = Contract::load(ABI)?;
let function = abi.function("deposit")?;
function.encode_input(&params)
}
#[cfg(test)]
mod tests {
use super::*;
use types::{
test_utils::generate_deterministic_keypair, ChainSpec, EthSpec, Hash256, Keypair,
MinimalEthSpec, Signature,
};
type E = MinimalEthSpec;
fn get_deposit(keypair: Keypair, spec: &ChainSpec) -> DepositData {
let mut deposit_data = DepositData {
pubkey: keypair.pk.into(),
withdrawal_credentials: Hash256::from_slice(&[42; 32]),
amount: u64::max_value(),
signature: Signature::empty_signature().into(),
};
deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec);
deposit_data
}
#[test]
fn basic() {
let spec = &E::default_spec();
let keypair = generate_deterministic_keypair(42);
let deposit = get_deposit(keypair.clone(), spec);
let data = eth1_tx_data(&deposit).expect("should produce tx data");
assert_eq!(data.len(), 388, "bytes should be correct length");
}
}

View File

@ -55,7 +55,7 @@
use prometheus::{HistogramOpts, HistogramTimer, Opts};
pub use prometheus::{Histogram, IntCounter, IntGauge, Result};
pub use prometheus::{Encoder, Histogram, IntCounter, IntGauge, Result, TextEncoder};
/// Collect all the metrics for reporting.
pub fn gather() -> Vec<prometheus::proto::MetricFamily> {

View File

@ -12,3 +12,8 @@ url = "1.2"
serde = "1.0"
futures = "0.1.25"
types = { path = "../../../eth2/types" }
rest_api = { path = "../../../beacon_node/rest_api" }
hex = "0.3"
eth2_ssz = { path = "../../../eth2/utils/ssz" }
serde_json = "^1.0"
eth2_config = { path = "../../../eth2/utils/eth2_config" }

View File

@ -3,24 +3,46 @@
//!
//! Presently, this is only used for testing but it _could_ become a user-facing library.
use futures::{Future, IntoFuture};
use reqwest::r#async::{Client, RequestBuilder};
use serde::Deserialize;
use eth2_config::Eth2Config;
use futures::{future, Future, IntoFuture};
use reqwest::{
r#async::{Client, ClientBuilder, Response},
StatusCode,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use ssz::Encode;
use std::marker::PhantomData;
use std::net::SocketAddr;
use types::{BeaconBlock, BeaconState, EthSpec};
use types::{Hash256, Slot};
use std::time::Duration;
use types::{
Attestation, BeaconBlock, BeaconState, CommitteeIndex, Epoch, EthSpec, Fork, Hash256,
PublicKey, Signature, Slot,
};
use url::Url;
pub use rest_api::{BulkValidatorDutiesRequest, HeadResponse, ValidatorDuty};
// Setting a long timeout for debug ensures that crypto-heavy operations can still succeed.
#[cfg(debug_assertions)]
pub const REQUEST_TIMEOUT_SECONDS: u64 = 15;
#[cfg(not(debug_assertions))]
pub const REQUEST_TIMEOUT_SECONDS: u64 = 5;
#[derive(Clone)]
/// Connects to a remote Lighthouse (or compatible) node via HTTP.
pub struct RemoteBeaconNode<E: EthSpec> {
pub http: HttpClient<E>,
}
impl<E: EthSpec> RemoteBeaconNode<E> {
pub fn new(http_endpoint: SocketAddr) -> Result<Self, String> {
/// Uses the default HTTP timeout.
pub fn new(http_endpoint: String) -> Result<Self, String> {
Self::new_with_timeout(http_endpoint, Duration::from_secs(REQUEST_TIMEOUT_SECONDS))
}
pub fn new_with_timeout(http_endpoint: String, timeout: Duration) -> Result<Self, String> {
Ok(Self {
http: HttpClient::new(format!("http://{}", http_endpoint.to_string()))
http: HttpClient::new(http_endpoint, timeout)
.map_err(|e| format!("Unable to create http client: {:?}", e))?,
})
}
@ -28,23 +50,34 @@ impl<E: EthSpec> RemoteBeaconNode<E> {
#[derive(Debug)]
pub enum Error {
/// Unable to parse a URL. Check the server URL.
UrlParseError(url::ParseError),
/// The `reqwest` library returned an error.
ReqwestError(reqwest::Error),
/// There was an error when encoding/decoding an object using serde.
SerdeJsonError(serde_json::Error),
/// The server responded to the request, however it did not return a 200-type success code.
DidNotSucceed { status: StatusCode, body: String },
}
#[derive(Clone)]
pub struct HttpClient<E> {
client: Client,
url: Url,
timeout: Duration,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> HttpClient<E> {
/// Creates a new instance (without connecting to the node).
pub fn new(server_url: String) -> Result<Self, Error> {
pub fn new(server_url: String, timeout: Duration) -> Result<Self, Error> {
Ok(Self {
client: Client::new(),
client: ClientBuilder::new()
.timeout(timeout)
.build()
.expect("should build from static configuration"),
url: Url::parse(&server_url)?,
timeout: Duration::from_secs(15),
_phantom: PhantomData,
})
}
@ -53,13 +86,231 @@ impl<E: EthSpec> HttpClient<E> {
Beacon(self.clone())
}
pub fn validator(&self) -> Validator<E> {
Validator(self.clone())
}
pub fn spec(&self) -> Spec<E> {
Spec(self.clone())
}
pub fn node(&self) -> Node<E> {
Node(self.clone())
}
fn url(&self, path: &str) -> Result<Url, Error> {
self.url.join(path).map_err(|e| e.into())
}
pub fn get(&self, path: &str) -> Result<RequestBuilder, Error> {
self.url(path)
.map(|url| Client::new().get(&url.to_string()))
pub fn json_post<T: Serialize>(
&self,
url: Url,
body: T,
) -> impl Future<Item = Response, Error = Error> {
self.client
.post(&url.to_string())
.json(&body)
.send()
.map_err(Error::from)
}
pub fn json_get<T: DeserializeOwned>(
&self,
mut url: Url,
query_pairs: Vec<(String, String)>,
) -> impl Future<Item = T, Error = Error> {
query_pairs.into_iter().for_each(|(key, param)| {
url.query_pairs_mut().append_pair(&key, &param);
});
self.client
.get(&url.to_string())
.send()
.map_err(Error::from)
.and_then(|response| error_for_status(response).map_err(Error::from))
.and_then(|mut success| success.json::<T>().map_err(Error::from))
}
}
/// Returns an `Error` (with a description) if the `response` was not a 200-type success response.
///
/// Distinct from `Response::error_for_status` because it includes the body of the response as
/// text. This ensures the error message from the server is not discarded.
fn error_for_status(
mut response: Response,
) -> Box<dyn Future<Item = Response, Error = Error> + Send> {
let status = response.status();
if status.is_success() {
Box::new(future::ok(response))
} else {
Box::new(response.text().then(move |text_result| match text_result {
Err(e) => Err(Error::ReqwestError(e)),
Ok(body) => Err(Error::DidNotSucceed { status, body }),
}))
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishStatus {
/// The object was valid and has been published to the network.
Valid,
/// The object was not valid and may or may not have been published to the network.
Invalid(String),
/// The server responsed with an unknown status code. The object may or may not have been
/// published to the network.
Unknown,
}
impl PublishStatus {
/// Returns `true` if `*self == PublishStatus::Valid`.
pub fn is_valid(&self) -> bool {
*self == PublishStatus::Valid
}
}
/// Provides the functions on the `/beacon` endpoint of the node.
#[derive(Clone)]
pub struct Validator<E>(HttpClient<E>);
impl<E: EthSpec> Validator<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("validator/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Produces an unsigned attestation.
pub fn produce_attestation(
&self,
slot: Slot,
committee_index: CommitteeIndex,
) -> impl Future<Item = Attestation<E>, Error = Error> {
let query_params = vec![
("slot".into(), format!("{}", slot)),
("committee_index".into(), format!("{}", committee_index)),
];
let client = self.0.clone();
self.url("attestation")
.into_future()
.and_then(move |url| client.json_get(url, query_params))
}
/// Posts an attestation to the beacon node, expecting it to verify it and publish it to the network.
pub fn publish_attestation(
&self,
attestation: Attestation<E>,
) -> impl Future<Item = PublishStatus, Error = Error> {
let client = self.0.clone();
self.url("attestation")
.into_future()
.and_then(move |url| client.json_post::<_>(url, attestation))
.and_then(|mut response| {
response
.text()
.map(|text| (response, text))
.map_err(Error::from)
})
.and_then(|(response, text)| match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
})
}
/// Returns the duties required of the given validator pubkeys in the given epoch.
///
/// ## Warning
///
/// This method cannot request large amounts of validator duties because the query string fills
/// up the URL. I have seen requests of 1,024 fail. For large requests, use `get_duties_bulk`.
pub fn get_duties(
&self,
epoch: Epoch,
validator_pubkeys: &[PublicKey],
) -> impl Future<Item = Vec<ValidatorDuty>, Error = Error> {
let validator_pubkeys: Vec<String> =
validator_pubkeys.iter().map(pubkey_as_string).collect();
let client = self.0.clone();
self.url("duties").into_future().and_then(move |url| {
let mut query_params = validator_pubkeys
.into_iter()
.map(|pubkey| ("validator_pubkeys".to_string(), pubkey))
.collect::<Vec<_>>();
query_params.push(("epoch".into(), format!("{}", epoch.as_u64())));
client.json_get::<_>(url, query_params)
})
}
/// Returns the duties required of the given validator pubkeys in the given epoch.
pub fn get_duties_bulk(
&self,
epoch: Epoch,
validator_pubkeys: &[PublicKey],
) -> impl Future<Item = Vec<ValidatorDuty>, Error = Error> {
let client = self.0.clone();
let bulk_request = BulkValidatorDutiesRequest {
epoch,
pubkeys: validator_pubkeys.to_vec(),
};
self.url("duties")
.into_future()
.and_then(move |url| client.json_post::<_>(url, bulk_request))
.and_then(|response| error_for_status(response).map_err(Error::from))
.and_then(|mut success| success.json().map_err(Error::from))
}
/// Posts a block to the beacon node, expecting it to verify it and publish it to the network.
pub fn publish_block(
&self,
block: BeaconBlock<E>,
) -> impl Future<Item = PublishStatus, Error = Error> {
let client = self.0.clone();
self.url("block")
.into_future()
.and_then(move |url| client.json_post::<_>(url, block))
.and_then(|mut response| {
response
.text()
.map(|text| (response, text))
.map_err(Error::from)
})
.and_then(|(response, text)| match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
})
}
/// Requests a new (unsigned) block from the beacon node.
pub fn produce_block(
&self,
slot: Slot,
randao_reveal: Signature,
) -> impl Future<Item = BeaconBlock<E>, Error = Error> {
let client = self.0.clone();
self.url("block").into_future().and_then(move |url| {
client.json_get::<BeaconBlock<E>>(
url,
vec![
("slot".into(), format!("{}", slot.as_u64())),
("randao_reveal".into(), signature_as_string(&randao_reveal)),
],
)
})
}
}
@ -75,45 +326,130 @@ impl<E: EthSpec> Beacon<E> {
.map_err(Into::into)
}
pub fn get_genesis_time(&self) -> impl Future<Item = u64, Error = Error> {
let client = self.0.clone();
self.url("genesis_time")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
pub fn get_fork(&self) -> impl Future<Item = Fork, Error = Error> {
let client = self.0.clone();
self.url("fork")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
pub fn get_head(&self) -> impl Future<Item = HeadResponse, Error = Error> {
let client = self.0.clone();
self.url("head")
.into_future()
.and_then(move |url| client.json_get::<HeadResponse>(url, vec![]))
}
/// Returns the block and block root at the given slot.
pub fn block_at_slot(
pub fn get_block_by_slot(
&self,
slot: Slot,
) -> impl Future<Item = (BeaconBlock<E>, Hash256), Error = Error> {
self.get_block("slot".to_string(), format!("{}", slot.as_u64()))
}
/// Returns the block and block root at the given root.
pub fn get_block_by_root(
&self,
root: Hash256,
) -> impl Future<Item = (BeaconBlock<E>, Hash256), Error = Error> {
self.get_block("root".to_string(), root_as_string(root))
}
/// Returns the block and block root at the given slot.
fn get_block(
&self,
query_key: String,
query_param: String,
) -> impl Future<Item = (BeaconBlock<E>, Hash256), Error = Error> {
let client = self.0.clone();
self.url("block")
.into_future()
.and_then(move |mut url| {
url.query_pairs_mut()
.append_pair("slot", &format!("{}", slot.as_u64()));
client.get(&url.to_string())
.and_then(move |url| {
client.json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)])
})
.and_then(|builder| builder.send().map_err(Error::from))
.and_then(|response| response.error_for_status().map_err(Error::from))
.and_then(|mut success| success.json::<BlockResponse<E>>().map_err(Error::from))
.map(|response| (response.beacon_block, response.root))
}
/// Returns the state and state root at the given slot.
pub fn state_at_slot(
pub fn get_state_by_slot(
&self,
slot: Slot,
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
self.get_state("slot".to_string(), format!("{}", slot.as_u64()))
}
/// Returns the state and state root at the given root.
pub fn get_state_by_root(
&self,
root: Hash256,
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
self.get_state("root".to_string(), root_as_string(root))
}
/// Returns the state and state root at the given slot.
fn get_state(
&self,
query_key: String,
query_param: String,
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
let client = self.0.clone();
self.url("state")
.into_future()
.and_then(move |mut url| {
url.query_pairs_mut()
.append_pair("slot", &format!("{}", slot.as_u64()));
client.get(&url.to_string())
.and_then(move |url| {
client.json_get::<StateResponse<E>>(url, vec![(query_key, query_param)])
})
.and_then(|builder| builder.send().map_err(Error::from))
.and_then(|response| response.error_for_status().map_err(Error::from))
.and_then(|mut success| success.json::<StateResponse<E>>().map_err(Error::from))
.map(|response| (response.beacon_state, response.root))
}
}
/// Provides the functions on the `/spec` endpoint of the node.
#[derive(Clone)]
pub struct Spec<E>(HttpClient<E>);
impl<E: EthSpec> Spec<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("spec/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
pub fn get_eth2_config(&self) -> impl Future<Item = Eth2Config, Error = Error> {
let client = self.0.clone();
self.url("eth2_config")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
}
/// Provides the functions on the `/node` endpoint of the node.
#[derive(Clone)]
pub struct Node<E>(HttpClient<E>);
impl<E: EthSpec> Node<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("node/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
pub fn get_version(&self) -> impl Future<Item = String, Error = Error> {
let client = self.0.clone();
self.url("version")
.into_future()
.and_then(move |url| client.json_get(url, vec![]))
}
}
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec")]
pub struct BlockResponse<T: EthSpec> {
@ -128,6 +464,18 @@ pub struct StateResponse<T: EthSpec> {
pub root: Hash256,
}
fn root_as_string(root: Hash256) -> String {
format!("0x{:?}", root)
}
fn signature_as_string(signature: &Signature) -> String {
format!("0x{}", hex::encode(signature.as_ssz_bytes()))
}
fn pubkey_as_string(pubkey: &PublicKey) -> String {
format!("0x{}", hex::encode(pubkey.as_ssz_bytes()))
}
impl From<reqwest::Error> for Error {
fn from(e: reqwest::Error) -> Error {
Error::ReqwestError(e)
@ -139,3 +487,9 @@ impl From<url::ParseError> for Error {
Error::UrlParseError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::SerdeJsonError(e)
}
}

View File

@ -28,4 +28,7 @@ pub trait SlotClock: Send + Sync + Sized {
/// Returns the duration until the next slot.
fn duration_to_next_slot(&self) -> Option<Duration>;
/// Returns the duration until the first slot of the next epoch.
fn duration_to_next_epoch(&self, slots_per_epoch: u64) -> Option<Duration>;
}

View File

@ -65,6 +65,35 @@ impl SlotClock for SystemTimeSlotClock {
}
}
fn duration_to_next_epoch(&self, slots_per_epoch: u64) -> Option<Duration> {
let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?;
let genesis = self.genesis_duration;
let slot_start = |slot: Slot| -> Duration {
let slot = slot.as_u64() as u32;
genesis + slot * self.slot_duration
};
let epoch_start_slot = self
.now()
.map(|slot| slot.epoch(slots_per_epoch))
.map(|epoch| (epoch + 1).start_slot(slots_per_epoch))?;
if now >= genesis {
Some(
slot_start(epoch_start_slot)
.checked_sub(now)
.expect("The next epoch cannot start before now"),
)
} else {
Some(
genesis
.checked_sub(now)
.expect("Control flow ensures genesis is greater than or equal to now"),
)
}
}
fn slot_duration(&self) -> Duration {
self.slot_duration
}

View File

@ -37,6 +37,11 @@ impl SlotClock for TestingSlotClock {
Some(Duration::from_secs(1))
}
/// Always returns a duration of `1 * slots_per_epoch` second.
fn duration_to_next_epoch(&self, slots_per_epoch: u64) -> Option<Duration> {
Some(Duration::from_secs(slots_per_epoch))
}
/// Always returns a slot duration of 0 seconds.
fn slot_duration(&self) -> Duration {
Duration::from_secs(0)

View File

@ -193,7 +193,7 @@ impl<E: EthSpec> Environment<E> {
}
/// Returns a `Context` where the `service_name` is added to the logger output.
pub fn service_context(&mut self, service_name: &'static str) -> RuntimeContext<E> {
pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> {
RuntimeContext {
executor: self.runtime.executor(),
log: self.log.new(o!("service" => service_name)),

View File

@ -29,11 +29,11 @@ fn main() {
.short("s")
.long("spec")
.value_name("TITLE")
.help("Specifies the default eth2 spec type. Only effective when creating a new datadir.")
.help("Specifies the default eth2 spec type.")
.takes_value(true)
.possible_values(&["mainnet", "minimal", "interop"])
.global(true)
.default_value("minimal")
.default_value("minimal"),
)
.arg(
Arg::with_name("logfile")
@ -51,6 +51,15 @@ fn main() {
.possible_values(&["info", "debug", "trace", "warn", "error", "crit"])
.default_value("trace"),
)
.arg(
Arg::with_name("datadir")
.long("datadir")
.short("d")
.value_name("DIR")
.global(true)
.help("Data directory for keys and databases.")
.takes_value(true),
)
.subcommand(beacon_node::cli_app())
.subcommand(validator_client::cli_app())
.subcommand(account_manager::cli_app())
@ -123,17 +132,17 @@ fn run<E: EthSpec>(
//
// Creating a command which can run both might be useful future works.
if let Some(sub_matches) = matches.subcommand_matches("Account Manager") {
if let Some(sub_matches) = matches.subcommand_matches("account_manager") {
let runtime_context = environment.core_context();
account_manager::run(sub_matches, runtime_context);
// Exit early if the account manager was run. It does not used the tokio executor, so no
// need to wait for it to shutdown.
// Exit early if the account manager was run. It does not use the tokio executor, no need
// to wait for it to shutdown.
return Ok(());
}
let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("Beacon Node") {
let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") {
let runtime_context = environment.core_context();
let beacon = environment
@ -149,11 +158,16 @@ fn run<E: EthSpec>(
None
};
let validator_client = if let Some(sub_matches) = matches.subcommand_matches("Validator Client")
let validator_client = if let Some(sub_matches) = matches.subcommand_matches("validator_client")
{
let runtime_context = environment.core_context();
let validator = ProductionValidatorClient::new_from_cli(runtime_context, sub_matches)
let mut validator = environment
.runtime()
.block_on(ProductionValidatorClient::new_from_cli(
runtime_context,
sub_matches,
))
.map_err(|e| format!("Failed to init validator client: {}", e))?;
validator

2
protos/.gitignore vendored
View File

@ -1,2 +0,0 @@
src/services.rs
src/services_grpc.rs

View File

@ -1,14 +0,0 @@
[package]
name = "protos"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
description = "Google protobuf message and service definitions used in Lighthouse APIs."
[dependencies]
futures = "0.1.29"
grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] }
protobuf = "2.8.1"
[build-dependencies]
protoc-grpcio = "1.1.0"

View File

@ -1,10 +0,0 @@
extern crate protoc_grpcio;
use std::path::Path;
fn main() {
let proto_root = Path::new("src");
println!("cargo:rerun-if-changed={}", proto_root.display());
protoc_grpcio::compile_grpc_protos(&["services.proto"], &[proto_root], &proto_root, None)
.expect("Failed to compile gRPC definitions!");
}

View File

@ -1,5 +0,0 @@
// The protobuf code-generator is not up-to-date with clippy, therefore we silence some warnings.
#[allow(renamed_and_removed_lints)]
pub mod services;
#[allow(renamed_and_removed_lints)]
pub mod services_grpc;

View File

@ -1,159 +0,0 @@
// TODO: This setup requires that the BN (beacon node) holds the block in state
// during the interval between the `GenerateProposalRequest` and the
// `SubmitProposalRequest`.
//
// This is sub-optimal as if a validator client switches BN during this process
// the block will be lost.
//
// This "stateful" method is being used presently because it's easier and
// requires less maintenance as the `BeaconBlock` definition changes.
syntax = "proto3";
package ethereum.beacon.rpc.v1;
// Service that currently identifies a beacon node
service BeaconNodeService {
rpc Info(Empty) returns (NodeInfoResponse);
}
/// Service that handles block production
service BeaconBlockService {
// Requests a block to be signed from the beacon node.
rpc ProduceBeaconBlock(ProduceBeaconBlockRequest) returns (ProduceBeaconBlockResponse);
// Responds to the node the signed block to be published.
rpc PublishBeaconBlock(PublishBeaconBlockRequest) returns (PublishBeaconBlockResponse);
}
/// Service that provides the validator client with requisite knowledge about
//its public keys
service ValidatorService {
// Gets the block proposer slot and committee slot that a validator needs to
// perform work on.
rpc GetValidatorDuties(GetDutiesRequest) returns (GetDutiesResponse);
}
/// Service that handles validator attestations
service AttestationService {
rpc ProduceAttestationData(ProduceAttestationDataRequest) returns (ProduceAttestationDataResponse);
rpc PublishAttestation(PublishAttestationRequest) returns (PublishAttestationResponse);
}
/*
* Beacon Node Service Message
*/
message NodeInfoResponse {
string version = 1;
Fork fork = 2;
uint32 network_id = 3;
uint64 genesis_time = 4;
uint64 genesis_slot = 5;
}
message Fork {
bytes previous_version = 1;
bytes current_version = 2;
uint64 epoch = 3;
}
message Empty {}
/*
* Block Production Service Messages
*/
// Validator requests an unsigned proposal.
message ProduceBeaconBlockRequest {
uint64 slot = 1;
bytes randao_reveal = 2;
}
// Beacon node returns an unsigned proposal.
message ProduceBeaconBlockResponse {
BeaconBlock block = 1;
}
// Validator submits a signed proposal.
message PublishBeaconBlockRequest {
BeaconBlock block = 1;
}
// Beacon node indicates a successfully submitted proposal.
message PublishBeaconBlockResponse {
bool success = 1;
bytes msg = 2;
}
message BeaconBlock {
bytes ssz = 1;
}
/*
* Validator Service Messages
*/
// Validator Assignment
// the public keys of the validators
message Validators {
repeated bytes public_keys = 1;
}
// Propose slot
message GetDutiesRequest {
uint64 epoch = 1;
Validators validators = 2;
}
message GetDutiesResponse {
repeated ActiveValidator active_validators = 1;
}
message ActiveValidator {
oneof duty_oneof {
bool none = 1;
ValidatorDuty duty = 2;
}
}
message ValidatorDuty {
oneof block_oneof {
bool none = 1;
uint64 block_production_slot = 2;
}
uint64 attestation_slot = 3;
uint64 attestation_shard = 4;
uint64 committee_index = 5;
uint64 committee_len = 6;
}
/*
* Attestation Service Messages
*/
message ProduceAttestationDataRequest {
uint64 slot = 1;
uint64 shard = 2;
}
message ProduceAttestationDataResponse {
AttestationData attestation_data = 1;
}
message PublishAttestationRequest {
Attestation attestation = 1;
}
message Attestation {
bytes ssz = 1;
}
message PublishAttestationResponse {
bool success = 1;
bytes msg = 2;
}
message AttestationData {
bytes ssz = 1;
}

View File

@ -0,0 +1,12 @@
[package]
name = "beacon_chain_sim"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
node_test_rig = { path = "../node_test_rig" }
types = { path = "../../eth2/types" }
validator_client = { path = "../../validator_client" }

View File

@ -0,0 +1,131 @@
use node_test_rig::{
environment::{Environment, EnvironmentBuilder, RuntimeContext},
testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, LocalValidatorClient,
ProductionClient, ValidatorConfig,
};
use std::time::{SystemTime, UNIX_EPOCH};
use types::EthSpec;
pub type BeaconNode<E> = LocalBeaconNode<ProductionClient<E>>;
fn main() {
let nodes = 4;
let validators_per_node = 64 / nodes;
match simulation(nodes, validators_per_node) {
Ok(()) => println!("Simulation exited successfully"),
Err(e) => println!("Simulation exited with error: {}", e),
}
}
fn simulation(num_nodes: usize, validators_per_node: usize) -> Result<(), String> {
if num_nodes < 1 {
return Err("Must have at least one node".into());
}
let mut env = EnvironmentBuilder::minimal()
.async_logger("debug")?
.multi_threaded_tokio_runtime()?
.build()?;
let mut base_config = testing_client_config();
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("should get system time")
.as_secs();
base_config.genesis = ClientGenesis::Interop {
genesis_time: now,
validator_count: num_nodes * validators_per_node,
};
let boot_node =
BeaconNode::production(env.service_context("boot_node".into()), base_config.clone());
let mut nodes = (1..num_nodes)
.map(|i| {
let context = env.service_context(format!("node_{}", i));
new_with_bootnode_via_enr(context, &boot_node, base_config.clone())
})
.collect::<Vec<_>>();
let _validators = nodes
.iter()
.enumerate()
.map(|(i, node)| {
let mut context = env.service_context(format!("validator_{}", i));
// Pull the spec from the beacon node's beacon chain, in case there were some changes
// to the spec after the node booted.
context.eth2_config.spec = node
.client
.beacon_chain()
.expect("should have beacon chain")
.spec
.clone();
let context = env.service_context(format!("validator_{}", i));
let indices =
(i * validators_per_node..(i + 1) * validators_per_node).collect::<Vec<_>>();
new_validator_client(
&mut env,
context,
node,
ValidatorConfig::default(),
&indices,
)
})
.collect::<Vec<_>>();
nodes.insert(0, boot_node);
env.block_until_ctrl_c()?;
Ok(())
}
// TODO: this function does not result in nodes connecting to each other. This is a bug due to
// using a 0 port for discovery. Age is fixing it.
fn new_with_bootnode_via_enr<E: EthSpec>(
context: RuntimeContext<E>,
boot_node: &BeaconNode<E>,
base_config: ClientConfig,
) -> BeaconNode<E> {
let mut config = base_config;
config.network.boot_nodes.push(
boot_node
.client
.enr()
.expect("bootnode must have a network"),
);
BeaconNode::production(context, config)
}
// Note: this function will block until the validator can connect to the beaco node. It is
// recommended to ensure that the beacon node is running first.
fn new_validator_client<E: EthSpec>(
env: &mut Environment<E>,
context: RuntimeContext<E>,
beacon_node: &BeaconNode<E>,
base_config: ValidatorConfig,
keypair_indices: &[usize],
) -> LocalValidatorClient<E> {
let mut config = base_config;
let socket_addr = beacon_node
.client
.http_listen_addr()
.expect("Must have http started");
config.http_server = format!("http://{}:{}", socket_addr.ip(), socket_addr.port());
env.runtime()
.block_on(LocalValidatorClient::production_with_insecure_keypairs(
context,
config,
keypair_indices,
))
.expect("should start validator")
}

View File

@ -4,16 +4,10 @@ version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
build = "build.rs"
[build-dependencies]
reqwest = "0.9.20"
serde_json = "1.0"
[dependencies]
web3 = "0.8.0"
tokio = "0.1.17"
futures = "0.1.25"
types = { path = "../../eth2/types"}
eth2_ssz = { path = "../../eth2/utils/ssz"}
serde_json = "1.0"
deposit_contract = { path = "../../eth2/utils/deposit_contract"}

View File

@ -7,28 +7,21 @@
//! some initial issues.
mod ganache;
use deposit_contract::{eth1_tx_data, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS};
use futures::{stream, Future, IntoFuture, Stream};
use ganache::GanacheInstance;
use ssz::Encode;
use std::time::{Duration, Instant};
use tokio::{runtime::Runtime, timer::Delay};
use types::DepositData;
use types::{EthSpec, Hash256, Keypair, Signature};
use web3::contract::{Contract, Options};
use web3::transports::Http;
use web3::types::{Address, U256};
use web3::types::{Address, TransactionRequest, U256};
use web3::{Transport, Web3};
pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0;
pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0;
const CONTRACT_DEPLOY_GAS: usize = 4_000_000;
const DEPOSIT_GAS: usize = 4_000_000;
// Deposit contract
pub const ABI: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.json");
pub const BYTECODE: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.bytecode");
/// Provides a dedicated ganache-cli instance with the deposit contract already deployed.
pub struct GanacheEth1Instance {
pub ganache: GanacheInstance,
@ -138,6 +131,7 @@ impl DepositContract {
deposit_data: DepositData,
) -> impl Future<Item = (), Error = String> {
let contract = self.contract.clone();
let web3_1 = self.web3.clone();
self.web3
.eth()
@ -149,19 +143,27 @@ impl DepositContract {
.cloned()
.ok_or_else(|| "Insufficient accounts for deposit".to_string())
})
.and_then(move |from_address| {
let params = (
deposit_data.pubkey.as_ssz_bytes(),
deposit_data.withdrawal_credentials.as_ssz_bytes(),
deposit_data.signature.as_ssz_bytes(),
);
let options = Options {
.and_then(move |from| {
let tx_request = TransactionRequest {
from,
to: Some(contract.address()),
gas: Some(U256::from(DEPOSIT_GAS)),
gas_price: None,
value: Some(from_gwei(deposit_data.amount)),
..Options::default()
// Note: the reason we use this `TransactionRequest` instead of just using the
// function in `self.contract` is so that the `eth1_tx_data` function gets used
// during testing.
//
// It's important that `eth1_tx_data` stays correct and does not suffer from
// code-rot.
data: eth1_tx_data(&deposit_data).map(Into::into).ok(),
nonce: None,
condition: None,
};
contract
.call("deposit", params, from_address, options)
web3_1
.eth()
.send_transaction(tx_request)
.map_err(|e| format!("Failed to call deposit fn: {:?}", e))
})
.map(|_| ())

View File

@ -16,3 +16,4 @@ serde = "1.0"
futures = "0.1.25"
genesis = { path = "../../beacon_node/genesis" }
remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" }
validator_client = { path = "../../validator_client" }

View File

@ -1,25 +1,41 @@
use beacon_node::{
beacon_chain::BeaconChainTypes, Client, ClientConfig, ClientGenesis, ProductionBeaconNode,
ProductionClient,
};
//! Provides easy ways to run a beacon node or validator client in-process.
//!
//! Intended to be used for testing and simulation purposes. Not for production.
use beacon_node::{beacon_chain::BeaconChainTypes, Client, ProductionBeaconNode};
use environment::RuntimeContext;
use futures::Future;
use remote_beacon_node::RemoteBeaconNode;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
use tempdir::TempDir;
use types::EthSpec;
use validator_client::{KeySource, ProductionValidatorClient};
pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient};
pub use environment;
pub use validator_client::Config as ValidatorConfig;
/// Provides a beacon node that is running in the current process. Useful for testing purposes.
/// Provids a beacon node that is running in the current process on a given tokio executor (it
/// is _local_ to this process).
///
/// Intended for use in testing and simulation. Not for production.
pub struct LocalBeaconNode<T> {
pub client: T,
pub datadir: TempDir,
}
impl<E: EthSpec> LocalBeaconNode<ProductionClient<E>> {
/// Starts a new, production beacon node.
pub fn production(context: RuntimeContext<E>) -> Self {
let (client_config, datadir) = testing_client_config();
/// Starts a new, production beacon node on the tokio runtime in the given `context`.
///
/// The node created is using the same types as the node we use in production.
pub fn production(context: RuntimeContext<E>, mut client_config: ClientConfig) -> Self {
// Creates a temporary directory that will be deleted once this `TempDir` is dropped.
let datadir = TempDir::new("lighthouse_node_test_rig")
.expect("should create temp directory for client datadir");
client_config.data_dir = datadir.path().into();
client_config.network.network_dir = PathBuf::from(datadir.path()).join("network");
let client = ProductionBeaconNode::new(context, client_config)
.wait()
@ -34,34 +50,99 @@ impl<T: BeaconChainTypes> LocalBeaconNode<Client<T>> {
/// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if
/// it were external this process.
pub fn remote_node(&self) -> Result<RemoteBeaconNode<T::EthSpec>, String> {
Ok(RemoteBeaconNode::new(
self.client
.http_listen_addr()
.ok_or_else(|| "A remote beacon node must have a http server".to_string())?,
)?)
let socket_addr = self
.client
.http_listen_addr()
.ok_or_else(|| "A remote beacon node must have a http server".to_string())?;
Ok(RemoteBeaconNode::new(format!(
"http://{}:{}",
socket_addr.ip(),
socket_addr.port()
))?)
}
}
fn testing_client_config() -> (ClientConfig, TempDir) {
// Creates a temporary directory that will be deleted once this `TempDir` is dropped.
let tempdir = TempDir::new("lighthouse_node_test_rig")
.expect("should create temp directory for client datadir");
pub fn testing_client_config() -> ClientConfig {
let mut client_config = ClientConfig::default();
client_config.data_dir = tempdir.path().into();
// Setting ports to `0` means that the OS will choose some available port.
client_config.network.libp2p_port = 0;
client_config.network.discovery_port = 0;
client_config.rpc.port = 0;
client_config.rest_api.port = 0;
client_config.websocket_server.port = 0;
client_config.dummy_eth1_backend = true;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("should get system time")
.as_secs();
client_config.genesis = ClientGenesis::Interop {
validator_count: 8,
genesis_time: 13_371_337,
genesis_time: now,
};
(client_config, tempdir)
client_config.dummy_eth1_backend = true;
client_config
}
/// Provids a validator client that is running in the current process on a given tokio executor (it
/// is _local_ to this process).
///
/// Intended for use in testing and simulation. Not for production.
pub struct LocalValidatorClient<T: EthSpec> {
pub client: ProductionValidatorClient<T>,
pub datadir: TempDir,
}
impl<E: EthSpec> LocalValidatorClient<E> {
/// Creates a validator client with insecure deterministic keypairs. The validator directories
/// are created in a temp dir then removed when the process exits.
///
/// The validator created is using the same types as the node we use in production.
pub fn production_with_insecure_keypairs(
context: RuntimeContext<E>,
mut config: ValidatorConfig,
keypair_indices: &[usize],
) -> impl Future<Item = Self, Error = String> {
// Creates a temporary directory that will be deleted once this `TempDir` is dropped.
let datadir = TempDir::new("lighthouse-beacon-node")
.expect("should create temp directory for client datadir");
config.key_source = KeySource::InsecureKeypairs(keypair_indices.to_vec());
Self::new(context, config, datadir)
}
/// Creates a validator client that attempts to read keys from the default data dir.
///
/// - The validator created is using the same types as the node we use in production.
/// - It is recommended to use `production_with_insecure_keypairs` for testing.
pub fn production(
context: RuntimeContext<E>,
config: ValidatorConfig,
) -> impl Future<Item = Self, Error = String> {
// Creates a temporary directory that will be deleted once this `TempDir` is dropped.
let datadir = TempDir::new("lighthouse-validator")
.expect("should create temp directory for client datadir");
Self::new(context, config, datadir)
}
fn new(
context: RuntimeContext<E>,
mut config: ValidatorConfig,
datadir: TempDir,
) -> impl Future<Item = Self, Error = String> {
config.data_dir = datadir.path().into();
ProductionValidatorClient::new(context, config).map(move |mut client| {
client
.start_service()
.expect("should start validator services");
Self { client, datadir }
})
}
}

View File

@ -9,15 +9,12 @@ name = "validator_client"
path = "src/lib.rs"
[dependencies]
bls = { path = "../eth2/utils/bls" }
eth2_ssz = "0.1.2"
eth2_config = { path = "../eth2/utils/eth2_config" }
tree_hash = "0.1.0"
clap = "2.33.0"
lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" }
eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" }
grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] }
protos = { path = "../protos" }
slot_clock = { path = "../eth2/utils/slot_clock" }
types = { path = "../eth2/types" }
serde = "1.0.102"
@ -37,3 +34,10 @@ environment = { path = "../lighthouse/environment" }
parking_lot = "0.7"
exit-future = "0.1.4"
libc = "0.2.65"
eth2_ssz_derive = { path = "../eth2/utils/ssz_derive" }
hex = "0.4"
deposit_contract = { path = "../eth2/utils/deposit_contract" }
bls = { path = "../eth2/utils/bls" }
remote_beacon_node = { path = "../eth2/utils/remote_beacon_node" }
tempdir = "0.3"
rayon = "1.2.0"

View File

@ -1,23 +0,0 @@
//TODO: generalise these enums to the crate
use crate::block_producer::{BeaconNodeError, PublishOutcome};
use types::{Attestation, AttestationData, CommitteeIndex, EthSpec, Slot};
/// Defines the methods required to produce and publish attestations on a Beacon Node. Abstracts the
/// actual beacon node.
pub trait BeaconNodeAttestation: Send + Sync {
/// Request that the node produces the required attestation data.
///
fn produce_attestation_data(
&self,
slot: Slot,
index: CommitteeIndex,
) -> Result<AttestationData, BeaconNodeError>;
/// Request that the node publishes a attestation.
///
/// Returns `true` if the publish was successful.
fn publish_attestation<T: EthSpec>(
&self,
attestation: Attestation<T>,
) -> Result<PublishOutcome, BeaconNodeError>;
}

View File

@ -1,57 +0,0 @@
use super::beacon_node_attestation::BeaconNodeAttestation;
use crate::block_producer::{BeaconNodeError, PublishOutcome};
use protos::services_grpc::AttestationServiceClient;
use ssz::{Decode, Encode};
use protos::services::{
Attestation as GrpcAttestation, ProduceAttestationDataRequest, PublishAttestationRequest,
};
use types::{Attestation, AttestationData, CommitteeIndex, EthSpec, Slot};
impl BeaconNodeAttestation for AttestationServiceClient {
fn produce_attestation_data(
&self,
slot: Slot,
index: CommitteeIndex,
) -> Result<AttestationData, BeaconNodeError> {
let mut req = ProduceAttestationDataRequest::new();
req.set_slot(slot.as_u64());
req.set_shard(index);
let reply = self
.produce_attestation_data(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
let attestation_data =
AttestationData::from_ssz_bytes(reply.get_attestation_data().get_ssz())
.map_err(|_| BeaconNodeError::DecodeFailure)?;
Ok(attestation_data)
}
fn publish_attestation<T: EthSpec>(
&self,
attestation: Attestation<T>,
) -> Result<PublishOutcome, BeaconNodeError> {
let mut req = PublishAttestationRequest::new();
let ssz = attestation.as_ssz_bytes();
let mut grpc_attestation = GrpcAttestation::new();
grpc_attestation.set_ssz(ssz);
req.set_attestation(grpc_attestation);
let reply = self
.publish_attestation(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
if reply.get_success() {
Ok(PublishOutcome::Valid)
} else {
// TODO: distinguish between different errors
Ok(PublishOutcome::InvalidAttestation(
"Publish failed".to_string(),
))
}
}
}

View File

@ -1,166 +0,0 @@
mod beacon_node_attestation;
mod grpc;
use std::sync::Arc;
use types::{ChainSpec, Domain, EthSpec, Fork};
//TODO: Move these higher up in the crate
use super::block_producer::{BeaconNodeError, PublishOutcome, ValidatorEvent};
use crate::signer::Signer;
use beacon_node_attestation::BeaconNodeAttestation;
use core::marker::PhantomData;
use slog::{error, info, warn};
use tree_hash::TreeHash;
use types::{AggregateSignature, Attestation, AttestationData, AttestationDuty, BitList};
//TODO: Group these errors at a crate level
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconNodeError(BeaconNodeError),
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
/// This struct contains the logic for requesting and signing beacon attestations for a validator. The
/// validator can abstractly sign via the Signer trait object.
pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> {
/// The current fork.
pub fork: Fork,
/// The attestation duty to perform.
pub duty: AttestationDuty,
/// The current epoch.
pub spec: Arc<ChainSpec>,
/// The beacon node to connect to.
pub beacon_node: Arc<B>,
/// The signer to sign the block.
pub signer: &'a S,
/// Used for calculating epoch.
pub slots_per_epoch: u64,
/// Mere vessel for E.
pub _phantom: PhantomData<E>,
}
impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a, B, S, E> {
/// Handle outputs and results from attestation production.
pub fn handle_produce_attestation(&mut self, log: slog::Logger) {
match self.produce_attestation() {
Ok(ValidatorEvent::AttestationProduced(slot)) => info!(
log,
"Attestation produced";
"validator" => format!("{}", self.signer),
"slot" => slot,
),
Err(e) => error!(log, "Attestation production error"; "Error" => format!("{:?}", e)),
Ok(ValidatorEvent::SignerRejection(_slot)) => {
error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string())
}
Ok(ValidatorEvent::IndexedAttestationNotProduced(_slot)) => {
error!(log, "Attestation production error"; "Error" => "Rejected the attestation as it could have been slashed".to_string())
}
Ok(ValidatorEvent::PublishAttestationFailed) => {
error!(log, "Attestation production error"; "Error" => "Beacon node was unable to publish an attestation".to_string())
}
Ok(ValidatorEvent::InvalidAttestation) => {
error!(log, "Attestation production error"; "Error" => "The signed attestation was invalid".to_string())
}
Ok(v) => {
warn!(log, "Unknown result for attestation production"; "Error" => format!("{:?}",v))
}
}
}
/// Produce an attestation, sign it and send it back
///
/// Assumes that an attestation is required at this slot (does not check the duties).
///
/// Ensures the message is not slashable.
///
/// !!! UNSAFE !!!
///
/// The slash-protection code is not yet implemented. There is zero protection against
/// slashing.
pub fn produce_attestation(&mut self) -> Result<ValidatorEvent, Error> {
let epoch = self.duty.slot.epoch(self.slots_per_epoch);
let attestation = self
.beacon_node
.produce_attestation_data(self.duty.slot, self.duty.index)?;
if self.safe_to_produce(&attestation) {
let domain = self
.spec
.get_domain(epoch, Domain::BeaconAttester, &self.fork);
if let Some(attestation) = self.sign_attestation(attestation, self.duty, domain) {
match self.beacon_node.publish_attestation(attestation) {
Ok(PublishOutcome::InvalidAttestation(_string)) => {
Ok(ValidatorEvent::InvalidAttestation)
}
Ok(PublishOutcome::Valid) => {
Ok(ValidatorEvent::AttestationProduced(self.duty.slot))
}
Err(_) | Ok(_) => Ok(ValidatorEvent::PublishAttestationFailed),
}
} else {
Ok(ValidatorEvent::SignerRejection(self.duty.slot))
}
} else {
Ok(ValidatorEvent::IndexedAttestationNotProduced(
self.duty.slot,
))
}
}
/// Consumes an attestation, returning the attestation signed by the validators private key.
///
/// Important: this function will not check to ensure the attestation is not slashable. This must be
/// done upstream.
fn sign_attestation(
&mut self,
attestation: AttestationData,
duties: AttestationDuty,
domain: u64,
) -> Option<Attestation<E>> {
self.store_produce(&attestation);
// build the aggregate signature
let aggregate_signature = {
let message = attestation.tree_hash_root();
let sig = self.signer.sign_message(&message, domain)?;
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&sig);
agg_sig
};
let mut aggregation_bits = BitList::with_capacity(duties.committee_len).ok()?;
aggregation_bits.set(duties.committee_position, true).ok()?;
Some(Attestation {
aggregation_bits,
data: attestation,
signature: aggregate_signature,
})
}
/// Returns `true` if signing an attestation is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _attestation: &AttestationData) -> bool {
//TODO: Implement slash protection
true
}
/// Record that an attestation was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _attestation: &AttestationData) {
// TODO: Implement slash protection
}
}

View File

@ -0,0 +1,314 @@
use crate::{duties_service::DutiesService, validator_store::ValidatorStore};
use environment::RuntimeContext;
use exit_future::Signal;
use futures::{Future, Stream};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode, ValidatorDuty};
use slog::{crit, info, trace};
use slot_clock::SlotClock;
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, CommitteeIndex, EthSpec, Slot};
/// Builds an `AttestationService`.
pub struct AttestationServiceBuilder<T, E: EthSpec> {
duties_service: Option<DutiesService<T, E>>,
validator_store: Option<ValidatorStore<T, E>>,
slot_clock: Option<T>,
beacon_node: Option<RemoteBeaconNode<E>>,
context: Option<RuntimeContext<E>>,
}
impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> {
pub fn new() -> Self {
Self {
duties_service: None,
validator_store: None,
slot_clock: None,
beacon_node: None,
context: None,
}
}
pub fn duties_service(mut self, service: DutiesService<T, E>) -> Self {
self.duties_service = Some(service);
self
}
pub fn validator_store(mut self, store: ValidatorStore<T, E>) -> Self {
self.validator_store = Some(store);
self
}
pub fn slot_clock(mut self, slot_clock: T) -> Self {
self.slot_clock = Some(slot_clock);
self
}
pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self {
self.beacon_node = Some(beacon_node);
self
}
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
self.context = Some(context);
self
}
pub fn build(self) -> Result<AttestationService<T, E>, String> {
Ok(AttestationService {
inner: Arc::new(Inner {
duties_service: self
.duties_service
.ok_or_else(|| "Cannot build AttestationService without duties_service")?,
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build AttestationService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build AttestationService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build AttestationService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build AttestationService without runtime_context")?,
}),
})
}
}
/// Helper to minimise `Arc` usage.
pub struct Inner<T, E: EthSpec> {
duties_service: DutiesService<T, E>,
validator_store: ValidatorStore<T, E>,
slot_clock: T,
beacon_node: RemoteBeaconNode<E>,
context: RuntimeContext<E>,
}
/// Attempts to produce attestations for all known validators 1/3rd of the way through each slot.
///
/// If any validators are on the same committee, a single attestation will be downloaded and
/// returned to the beacon node. This attestation will have a signature from each of the
/// validators.
pub struct AttestationService<T, E: EthSpec> {
inner: Arc<Inner<T, E>>,
}
impl<T, E: EthSpec> Clone for AttestationService<T, E> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T, E: EthSpec> Deref for AttestationService<T, E> {
type Target = Inner<T, E>;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// Starts the service which periodically produces attestations.
pub fn start_update_service(&self, spec: &ChainSpec) -> Result<Signal, String> {
let context = &self.context;
let log = context.log.clone();
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new(
Instant::now() + duration_to_next_slot + slot_duration / 3,
slot_duration,
)
};
let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone();
let log_1 = log.clone();
let log_2 = log.clone();
let log_3 = log.clone();
context.executor.spawn(
exit_fut
.until(
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| {
if let Err(e) = service.spawn_attestation_tasks() {
crit!(
log_2,
"Failed to spawn attestation tasks";
"error" => e
)
} else {
trace!(
log_2,
"Spawned attestation tasks";
)
}
Ok(())
})
// Prevent any errors from escaping and stopping the interval.
.then(|_| Ok(())),
)
.map(move |_| info!(log_3, "Shutdown complete")),
);
Ok(exit_signal)
}
/// For each each required attestation, spawn a new task that downloads, signs and uploads the
/// attestation to the beacon node.
fn spawn_attestation_tasks(&self) -> Result<(), String> {
let service = self.clone();
let slot = service
.slot_clock
.now()
.ok_or_else(|| "Failed to read slot clock".to_string())?;
let mut committee_indices: HashMap<CommitteeIndex, Vec<ValidatorDuty>> = HashMap::new();
service
.duties_service
.attesters(slot)
.into_iter()
.for_each(|duty| {
if let Some(committee_index) = duty.attestation_committee_index {
let validator_duties =
committee_indices.entry(committee_index).or_insert(vec![]);
validator_duties.push(duty);
}
});
committee_indices
.into_iter()
.for_each(|(committee_index, validator_duties)| {
// Spawn a separate task for each attestation.
service.context.executor.spawn(self.clone().do_attestation(
slot,
committee_index,
validator_duties,
));
});
Ok(())
}
/// For a given `committee_index`, download the attestation, have it signed by all validators
/// in `validator_duties` then upload it.
fn do_attestation(
&self,
slot: Slot,
committee_index: CommitteeIndex,
validator_duties: Vec<ValidatorDuty>,
) -> impl Future<Item = (), Error = ()> {
let service_1 = self.clone();
let service_2 = self.clone();
let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone();
self.beacon_node
.http
.validator()
.produce_attestation(slot, committee_index)
.map_err(|e| format!("Failed to produce attestation: {:?}", e))
.map(move |attestation| {
validator_duties
.iter()
.fold(attestation, |mut attestation, duty| {
let log = service_1.context.log.clone();
if let Some((
duty_slot,
duty_committee_index,
validator_committee_position,
)) = attestation_duties(duty)
{
if duty_slot == slot && duty_committee_index == committee_index {
if service_1
.validator_store
.sign_attestation(
&duty.validator_pubkey,
validator_committee_position,
&mut attestation,
)
.is_none()
{
crit!(log, "Failed to sign attestation");
}
} else {
crit!(log, "Inconsistent validator duties during signing");
}
} else {
crit!(log, "Missing validator duties when signing");
}
attestation
})
})
.and_then(move |attestation| {
service_2
.beacon_node
.http
.validator()
.publish_attestation(attestation.clone())
.map(|publish_status| (attestation, publish_status))
.map_err(|e| format!("Failed to publish attestation: {:?}", e))
})
.map(move |(attestation, publish_status)| match publish_status {
PublishStatus::Valid => info!(
log_1,
"Successfully published attestation";
"signatures" => attestation.aggregation_bits.num_set_bits(),
"head_block" => format!("{}", attestation.data.beacon_block_root),
"committee_index" => attestation.data.index,
"slot" => attestation.data.slot.as_u64(),
),
PublishStatus::Invalid(msg) => crit!(
log_1,
"Published attestation was invalid";
"message" => msg,
"committee_index" => attestation.data.index,
"slot" => attestation.data.slot.as_u64(),
),
PublishStatus::Unknown => {
crit!(log_1, "Unknown condition when publishing attestation")
}
})
.map_err(move |e| {
crit!(
log_2,
"Error during attestation production";
"error" => e
)
})
}
}
fn attestation_duties(duty: &ValidatorDuty) -> Option<(Slot, CommitteeIndex, usize)> {
Some((
duty.attestation_slot?,
duty.attestation_committee_index?,
duty.attestation_committee_position?,
))
}

View File

@ -1,34 +0,0 @@
use types::{BeaconBlock, EthSpec, Signature, Slot};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError {
RemoteFailure(String),
DecodeFailure,
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishOutcome {
Valid,
InvalidBlock(String),
InvalidAttestation(String),
}
/// Defines the methods required to produce and publish blocks on a Beacon Node. Abstracts the
/// actual beacon node.
pub trait BeaconNodeBlock: Send + Sync {
/// Request that the node produces a block.
///
/// Returns Ok(None) if the Beacon Node is unable to produce at the given slot.
fn produce_beacon_block<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &Signature,
) -> Result<Option<BeaconBlock<T>>, BeaconNodeError>;
/// Request that the node publishes a block.
///
/// Returns `true` if the publish was successful.
fn publish_beacon_block<T: EthSpec>(
&self,
block: BeaconBlock<T>,
) -> Result<PublishOutcome, BeaconNodeError>;
}

View File

@ -1,87 +0,0 @@
use super::beacon_node_block::*;
use protos::services::{
BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest,
};
use protos::services_grpc::BeaconBlockServiceClient;
use ssz::{Decode, Encode};
use std::sync::Arc;
use types::{BeaconBlock, EthSpec, Signature, Slot};
//TODO: Remove this new type. Do not need to wrap
/// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be
/// implemented upon it.
pub struct BeaconBlockGrpcClient {
client: Arc<BeaconBlockServiceClient>,
}
impl BeaconBlockGrpcClient {
pub fn new(client: Arc<BeaconBlockServiceClient>) -> Self {
Self { client }
}
}
impl BeaconNodeBlock for BeaconBlockGrpcClient {
/// Request a Beacon Node (BN) to produce a new block at the supplied slot.
///
/// Returns `None` if it is not possible to produce at the supplied slot. For example, if the
/// BN is unable to find a parent block.
fn produce_beacon_block<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &Signature,
) -> Result<Option<BeaconBlock<T>>, BeaconNodeError> {
// request a beacon block from the node
let mut req = ProduceBeaconBlockRequest::new();
req.set_slot(slot.as_u64());
req.set_randao_reveal(randao_reveal.as_ssz_bytes());
//TODO: Determine if we want an explicit timeout
let reply = self
.client
.produce_beacon_block(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
// format the reply
if reply.has_block() {
let block = reply.get_block();
let ssz = block.get_ssz();
let block =
BeaconBlock::from_ssz_bytes(&ssz).map_err(|_| BeaconNodeError::DecodeFailure)?;
Ok(Some(block))
} else {
Ok(None)
}
}
/// Request a Beacon Node (BN) to publish a block.
///
/// Generally, this will be called after a `produce_beacon_block` call with a block that has
/// been completed (signed) by the validator client.
fn publish_beacon_block<T: EthSpec>(
&self,
block: BeaconBlock<T>,
) -> Result<PublishOutcome, BeaconNodeError> {
let mut req = PublishBeaconBlockRequest::new();
let ssz = block.as_ssz_bytes();
let mut grpc_block = GrpcBeaconBlock::new();
grpc_block.set_ssz(ssz);
req.set_block(grpc_block);
let reply = self
.client
.publish_beacon_block(&req)
.map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?;
if reply.get_success() {
Ok(PublishOutcome::Valid)
} else {
// TODO: distinguish between different errors
Ok(PublishOutcome::InvalidBlock("Publish failed".to_string()))
}
}
}

View File

@ -1,259 +0,0 @@
mod beacon_node_block;
mod grpc;
use self::beacon_node_block::BeaconNodeBlock;
pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome};
pub use self::grpc::BeaconBlockGrpcClient;
use crate::signer::Signer;
use core::marker::PhantomData;
use slog::{error, info, trace, warn};
use std::sync::Arc;
use tree_hash::{SignedRoot, TreeHash};
use types::{BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Slot};
#[derive(Debug, PartialEq)]
pub enum Error {
BeaconNodeError(BeaconNodeError),
}
#[derive(Debug, PartialEq)]
pub enum ValidatorEvent {
/// A new block was produced.
BlockProduced(Slot),
/// A new attestation was produced.
AttestationProduced(Slot),
/// A block was not produced as it would have been slashable.
SlashableBlockNotProduced(Slot),
/// An attestation was not produced as it would have been slashable.
IndexedAttestationNotProduced(Slot),
/// The Beacon Node was unable to produce a block at that slot.
BeaconNodeUnableToProduceBlock(Slot),
/// The signer failed to sign the message.
SignerRejection(Slot),
/// Publishing an attestation failed.
PublishAttestationFailed,
/// Beacon node rejected the attestation.
InvalidAttestation,
}
/// This struct contains the logic for requesting and signing beacon blocks for a validator. The
/// validator can abstractly sign via the Signer trait object.
pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> {
/// The current fork.
pub fork: Fork,
/// The current slot to produce a block for.
pub slot: Slot,
/// The current epoch.
pub spec: Arc<ChainSpec>,
/// The beacon node to connect to.
pub beacon_node: Arc<B>,
/// The signer to sign the block.
pub signer: &'a S,
/// Used for calculating epoch.
pub slots_per_epoch: u64,
/// Mere vessel for E.
pub _phantom: PhantomData<E>,
/// The logger, for logging
pub log: slog::Logger,
}
impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> {
/// Handle outputs and results from block production.
pub fn handle_produce_block(&mut self) {
match self.produce_block() {
Ok(ValidatorEvent::BlockProduced(slot)) => info!(
self.log,
"Block produced";
"validator" => format!("{}", self.signer),
"slot" => slot,
),
Err(e) => error!(self.log, "Block production error"; "Error" => format!("{:?}", e)),
Ok(ValidatorEvent::SignerRejection(_slot)) => {
error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string())
}
Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => {
error!(self.log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string())
}
Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => {
error!(self.log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string())
}
Ok(v) => {
warn!(self.log, "Unknown result for block production"; "Error" => format!("{:?}",v))
}
}
}
/// Produce a block at some slot.
///
/// Assumes that a block is required at this slot (does not check the duties).
///
/// Ensures the message is not slashable.
///
/// !!! UNSAFE !!!
///
/// The slash-protection code is not yet implemented. There is zero protection against
/// slashing.
pub fn produce_block(&mut self) -> Result<ValidatorEvent, Error> {
let epoch = self.slot.epoch(self.slots_per_epoch);
trace!(self.log, "Producing block"; "epoch" => epoch);
let message = epoch.tree_hash_root();
let randao_reveal = match self.signer.sign_message(
&message,
self.spec.get_domain(epoch, Domain::Randao, &self.fork),
) {
None => {
warn!(self.log, "Signing rejected"; "message" => format!("{:?}", message));
return Ok(ValidatorEvent::SignerRejection(self.slot));
}
Some(signature) => signature,
};
if let Some(block) = self
.beacon_node
.produce_beacon_block(self.slot, &randao_reveal)?
{
if self.safe_to_produce(&block) {
let slot = block.slot;
let domain = self
.spec
.get_domain(epoch, Domain::BeaconProposer, &self.fork);
if let Some(block) = self.sign_block(block, domain) {
self.beacon_node.publish_beacon_block(block)?;
Ok(ValidatorEvent::BlockProduced(slot))
} else {
Ok(ValidatorEvent::SignerRejection(self.slot))
}
} else {
Ok(ValidatorEvent::SlashableBlockNotProduced(self.slot))
}
} else {
Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(self.slot))
}
}
/// Consumes a block, returning that block signed by the validators private key.
///
/// Important: this function will not check to ensure the block is not slashable. This must be
/// done upstream.
fn sign_block(&mut self, mut block: BeaconBlock<E>, domain: u64) -> Option<BeaconBlock<E>> {
self.store_produce(&block);
match self.signer.sign_message(&block.signed_root()[..], domain) {
None => None,
Some(signature) => {
block.signature = signature;
Some(block)
}
}
}
/// Returns `true` if signing a block is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _block: &BeaconBlock<E>) -> bool {
// TODO: ensure the producer doesn't produce slashable blocks.
// https://github.com/sigp/lighthouse/issues/160
true
}
/// Record that a block was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _block: &BeaconBlock<E>) {
// TODO: record this block production to prevent future slashings.
// https://github.com/sigp/lighthouse/issues/160
}
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
/* Old tests - Re-work for new logic
#[cfg(test)]
mod tests {
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
use super::*;
use slot_clock::TestingSlotClock;
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
Keypair,
};
// TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160
//
// These tests should serve as a good example for future tests.
#[test]
pub fn polling() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let spec = Arc::new(ChainSpec::mainnet());
let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut epoch_map = EpochMap::new(T::slots_per_epoch());
let produce_slot = Slot::new(100);
let produce_epoch = produce_slot.epoch(T::slots_per_epoch());
epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map);
let mut block_proposer = BlockProducer::new(
spec.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
// Configure responses from the BeaconNode.
beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng))));
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock));
// One slot before production slot...
slot_clock.set_slot(produce_slot.as_u64() - 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1))
);
// On the produce slot...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProduced(produce_slot.into()))
);
// Trying the same produce slot again...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::SlotAlreadyProcessed(produce_slot))
);
// One slot after the produce slot...
slot_clock.set_slot(produce_slot.as_u64() + 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1))
);
// In an epoch without known duties...
let slot = (produce_epoch.as_u64() + 1) * T::slots_per_epoch();
slot_clock.set_slot(slot);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot)))
);
}
}
*/

View File

@ -0,0 +1,268 @@
use crate::{duties_service::DutiesService, validator_store::ValidatorStore};
use environment::RuntimeContext;
use exit_future::Signal;
use futures::{stream, Future, IntoFuture, Stream};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use slog::{crit, error, info, trace};
use slot_clock::SlotClock;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, EthSpec};
/// Delay this period of time after the slot starts. This allows the node to process the new slot.
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100);
/// Builds a `BlockService`.
pub struct BlockServiceBuilder<T, E: EthSpec> {
duties_service: Option<DutiesService<T, E>>,
validator_store: Option<ValidatorStore<T, E>>,
slot_clock: Option<Arc<T>>,
beacon_node: Option<RemoteBeaconNode<E>>,
context: Option<RuntimeContext<E>>,
}
impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> {
pub fn new() -> Self {
Self {
duties_service: None,
validator_store: None,
slot_clock: None,
beacon_node: None,
context: None,
}
}
pub fn duties_service(mut self, service: DutiesService<T, E>) -> Self {
self.duties_service = Some(service);
self
}
pub fn validator_store(mut self, store: ValidatorStore<T, E>) -> Self {
self.validator_store = Some(store);
self
}
pub fn slot_clock(mut self, slot_clock: T) -> Self {
self.slot_clock = Some(Arc::new(slot_clock));
self
}
pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self {
self.beacon_node = Some(beacon_node);
self
}
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
self.context = Some(context);
self
}
pub fn build(self) -> Result<BlockService<T, E>, String> {
Ok(BlockService {
inner: Arc::new(Inner {
duties_service: self
.duties_service
.ok_or_else(|| "Cannot build BlockService without duties_service")?,
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build BlockService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build BlockService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build BlockService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build BlockService without runtime_context")?,
}),
})
}
}
/// Helper to minimise `Arc` usage.
pub struct Inner<T, E: EthSpec> {
duties_service: DutiesService<T, E>,
validator_store: ValidatorStore<T, E>,
slot_clock: Arc<T>,
beacon_node: RemoteBeaconNode<E>,
context: RuntimeContext<E>,
}
/// Attempts to produce attestations for any block producer(s) at the start of the epoch.
pub struct BlockService<T, E: EthSpec> {
inner: Arc<Inner<T, E>>,
}
impl<T, E: EthSpec> Clone for BlockService<T, E> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T, E: EthSpec> Deref for BlockService<T, E> {
type Target = Inner<T, E>;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
/// Starts the service that periodically attempts to produce blocks.
pub fn start_update_service(&self, spec: &ChainSpec) -> Result<Signal, String> {
let log = self.context.log.clone();
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new(
Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT,
slot_duration,
)
};
let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone();
let log_1 = log.clone();
let log_2 = log.clone();
self.context.executor.spawn(
exit_fut
.until(
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| service.clone().do_update())
// Prevent any errors from escaping and stopping the interval.
.then(|_| Ok(())),
)
.map(move |_| info!(log_2, "Shutdown complete")),
);
Ok(exit_signal)
}
/// Attempt to produce a block for any block producers in the `ValidatorStore`.
fn do_update(self) -> impl Future<Item = (), Error = ()> {
let service = self.clone();
let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone();
self.slot_clock
.now()
.ok_or_else(move || {
crit!(log_1, "Duties manager failed to read slot clock");
})
.into_future()
.and_then(move |slot| {
let iter = service.duties_service.block_producers(slot).into_iter();
if iter.len() == 0 {
trace!(
log_2,
"No local block proposers for this slot";
"slot" => slot.as_u64()
)
} else if iter.len() > 1 {
error!(
log_2,
"Multiple block proposers for this slot";
"action" => "producing blocks for all proposers",
"num_proposers" => iter.len(),
"slot" => slot.as_u64(),
)
}
stream::unfold(iter, move |mut block_producers| {
let log_1 = service.context.log.clone();
let log_2 = service.context.log.clone();
let service_1 = service.clone();
let service_2 = service.clone();
let service_3 = service.clone();
block_producers.next().map(move |validator_pubkey| {
service_1
.validator_store
.randao_reveal(&validator_pubkey, slot.epoch(E::slots_per_epoch()))
.ok_or_else(|| "Unable to produce randao reveal".to_string())
.into_future()
.and_then(move |randao_reveal| {
service_1
.beacon_node
.http
.validator()
.produce_block(slot, randao_reveal)
.map_err(|e| {
format!(
"Error from beacon node when producing block: {:?}",
e
)
})
})
.and_then(move |block| {
service_2
.validator_store
.sign_block(&validator_pubkey, block)
.ok_or_else(|| "Unable to sign block".to_string())
})
.and_then(move |block| {
service_3
.beacon_node
.http
.validator()
.publish_block(block.clone())
.map(|publish_status| (block, publish_status))
.map_err(|e| {
format!(
"Error from beacon node when publishing block: {:?}",
e
)
})
})
.map(move |(block, publish_status)| match publish_status {
PublishStatus::Valid => info!(
log_1,
"Successfully published block";
"deposits" => block.body.deposits.len(),
"attestations" => block.body.attestations.len(),
"slot" => block.slot.as_u64(),
),
PublishStatus::Invalid(msg) => crit!(
log_1,
"Published block was invalid";
"message" => msg,
"slot" => block.slot.as_u64(),
),
PublishStatus::Unknown => {
crit!(log_1, "Unknown condition when publishing block")
}
})
.map_err(move |e| {
crit!(
log_2,
"Error whilst producing block";
"message" => e
)
})
.then(|_| Ok(((), block_producers)))
})
})
.collect()
.map(|_| ())
})
}
}

View File

@ -1,44 +1,16 @@
use crate::config::{DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, DEFAULT_SERVER_HTTP_PORT};
use crate::config::DEFAULT_HTTP_SERVER;
use clap::{App, Arg, SubCommand};
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("Validator Client")
.visible_aliases(&["v", "vc", "validator", "validator_client"])
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>")
.about("Eth 2.0 Validator Client")
.arg(
Arg::with_name("datadir")
.long("datadir")
.short("d")
.value_name("DIR")
.help("Data directory for keys and databases.")
.takes_value(true),
)
App::new("validator_client")
.visible_aliases(&["v", "vc", "validator"])
.about("Ethereum 2.0 Validator Client")
.arg(
Arg::with_name("server")
.long("server")
.value_name("NETWORK_ADDRESS")
.help("Address to connect to BeaconNode.")
.default_value(DEFAULT_SERVER)
.takes_value(true),
)
.arg(
Arg::with_name("server-grpc-port")
.long("server-grpc-port")
.short("g")
.value_name("PORT")
.help("Port to use for gRPC API connection to the server.")
.default_value(DEFAULT_SERVER_GRPC_PORT)
.takes_value(true),
)
.arg(
Arg::with_name("server-http-port")
.long("server-http-port")
.short("h")
.value_name("PORT")
.help("Port to use for HTTP API connection to the server.")
.default_value(DEFAULT_SERVER_HTTP_PORT)
.default_value(&DEFAULT_HTTP_SERVER)
.takes_value(true),
)
/*
@ -49,12 +21,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.subcommand(SubCommand::with_name("testnet")
.about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \
validator index. ONLY USE FOR TESTING PURPOSES!")
.arg(
Arg::with_name("bootstrap")
.short("b")
.long("bootstrap")
.help("Connect to the RPC server to download the eth2_config via the HTTP API.")
)
.subcommand(SubCommand::with_name("insecure")
.about("Uses the standard, predicatable `interop` keygen method to produce a range \
of predicatable private keys and starts performing their validator duties.")
@ -62,10 +28,10 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.value_name("VALIDATOR_INDEX")
.required(true)
.help("The first validator public key to be generated for this client."))
.arg(Arg::with_name("validator_count")
.value_name("COUNT")
.arg(Arg::with_name("last_validator")
.value_name("VALIDATOR_INDEX")
.required(true)
.help("The number of validators."))
.help("The end of the range of keys to generate. This index is not generated."))
)
.subcommand(SubCommand::with_name("interop-yaml")
.about("Loads plain-text secret keys from YAML files. Expects the interop format defined

View File

@ -1,29 +1,16 @@
use bincode;
use bls::Keypair;
use clap::ArgMatches;
use serde_derive::{Deserialize, Serialize};
use slog::{error, warn};
use std::fs::{self, File};
use std::io::{Error, ErrorKind};
use std::ops::Range;
use std::path::PathBuf;
use types::{
test_utils::{generate_deterministic_keypair, load_keypairs_from_yaml},
EthSpec, MainnetEthSpec,
};
pub const DEFAULT_SERVER: &str = "localhost";
pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051";
pub const DEFAULT_SERVER_HTTP_PORT: &str = "5052";
pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/";
/// Specifies a method for obtaining validator keypairs.
#[derive(Clone)]
pub enum KeySource {
/// Load the keypairs from disk.
Disk,
/// Generate the keypairs (insecure, generates predictable keys).
TestingKeypairRange(Range<usize>),
/// Load testing keypairs from YAML
YamlKeypairs(PathBuf),
InsecureKeypairs(Vec<usize>),
}
impl Default for KeySource {
@ -37,205 +24,78 @@ impl Default for KeySource {
pub struct Config {
/// The data directory, which stores all validator databases
pub data_dir: PathBuf,
/// The source for loading keypairs
/// Specifies how the validator client should load keypairs.
#[serde(skip)]
pub key_source: KeySource,
/// The path where the logs will be outputted
pub log_file: PathBuf,
/// The server at which the Beacon Node can be contacted
pub server: String,
/// The gRPC port on the server
pub server_grpc_port: u16,
/// The HTTP port on the server, for the REST API.
pub server_http_port: u16,
/// The number of slots per epoch.
pub slots_per_epoch: u64,
/// The http endpoint of the beacon node API.
///
/// Should be similar to `http://localhost:8080`
pub http_server: String,
}
const DEFAULT_PRIVATE_KEY_FILENAME: &str = "private.key";
impl Default for Config {
/// Build a new configuration from defaults.
fn default() -> Self {
Self {
data_dir: PathBuf::from(".lighthouse-validator"),
data_dir: PathBuf::from(".lighthouse/validators"),
key_source: <_>::default(),
log_file: PathBuf::from(""),
server: DEFAULT_SERVER.into(),
server_grpc_port: DEFAULT_SERVER_GRPC_PORT
.parse::<u16>()
.expect("gRPC port constant should be valid"),
server_http_port: DEFAULT_SERVER_GRPC_PORT
.parse::<u16>()
.expect("HTTP port constant should be valid"),
slots_per_epoch: MainnetEthSpec::slots_per_epoch(),
http_server: DEFAULT_HTTP_SERVER.to_string(),
}
}
}
impl Config {
/// Returns the full path for the client data directory (not just the name of the directory).
pub fn full_data_dir(&self) -> Option<PathBuf> {
dirs::home_dir().map(|path| path.join(&self.data_dir))
}
/// Returns a `Default` implementation of `Self` with some parameters modified by the supplied
/// `cli_args`.
pub fn from_cli(cli_args: &ArgMatches) -> Result<Config, String> {
let mut config = Config::default();
/// Creates the data directory (and any non-existing parent directories).
pub fn create_data_dir(&self) -> Option<PathBuf> {
let path = dirs::home_dir()?.join(&self.data_dir);
fs::create_dir_all(&path).ok()?;
Some(path)
}
/// Apply the following arguments to `self`, replacing values if they are specified in `args`.
///
/// Returns an error if arguments are obviously invalid. May succeed even if some values are
/// invalid.
pub fn apply_cli_args(
&mut self,
args: &ArgMatches,
_log: &slog::Logger,
) -> Result<(), &'static str> {
if let Some(datadir) = args.value_of("datadir") {
self.data_dir = PathBuf::from(datadir);
};
if let Some(srv) = args.value_of("server") {
self.server = srv.to_string();
};
Ok(())
}
/// Reads a single keypair from the given `path`.
///
/// `path` should be the path to a directory containing a private key. The file name of `path`
/// must align with the public key loaded from it, otherwise an error is returned.
///
/// An error will be returned if `path` is a file (not a directory).
fn read_keypair_file(&self, path: PathBuf) -> Result<Keypair, String> {
if !path.is_dir() {
return Err("Is not a directory".into());
if let Some(server) = cli_args.value_of("server") {
config.http_server = server.to_string();
}
let key_filename: PathBuf = path.join(DEFAULT_PRIVATE_KEY_FILENAME);
if !key_filename.is_file() {
return Err(format!(
"Private key is not a file: {:?}",
key_filename.to_str()
));
}
let mut key_file = File::open(key_filename.clone())
.map_err(|e| format!("Unable to open private key file: {}", e))?;
let key: Keypair = bincode::deserialize_from(&mut key_file)
.map_err(|e| format!("Unable to deserialize private key: {:?}", e))?;
let ki = key.identifier();
if ki
!= path
.file_name()
.ok_or_else(|| "Invalid path".to_string())?
.to_string_lossy()
{
Err(format!(
"The validator key ({:?}) did not match the directory filename {:?}.",
ki,
path.to_str()
))
} else {
Ok(key)
}
}
pub fn fetch_keys_from_disk(&self, log: &slog::Logger) -> Result<Vec<Keypair>, String> {
Ok(
fs::read_dir(&self.full_data_dir().expect("Data dir must exist"))
.map_err(|e| format!("Failed to read datadir: {:?}", e))?
.filter_map(|validator_dir| {
let path = validator_dir.ok()?.path();
if path.is_dir() {
match self.read_keypair_file(path.clone()) {
Ok(keypair) => Some(keypair),
Err(e) => {
error!(
log,
"Failed to parse a validator keypair";
"error" => e,
"path" => path.to_str(),
);
None
}
}
} else {
None
}
})
.collect(),
)
}
pub fn fetch_testing_keypairs(
&self,
range: std::ops::Range<usize>,
) -> Result<Vec<Keypair>, String> {
Ok(range.map(generate_deterministic_keypair).collect())
}
/// Loads the keypairs according to `self.key_source`. Will return one or more keypairs, or an
/// error.
#[allow(dead_code)]
pub fn fetch_keys(&self, log: &slog::Logger) -> Result<Vec<Keypair>, String> {
let keypairs = match &self.key_source {
KeySource::Disk => self.fetch_keys_from_disk(log)?,
KeySource::TestingKeypairRange(range) => {
warn!(
log,
"Using insecure interop private keys";
"range" => format!("{:?}", range)
);
self.fetch_testing_keypairs(range.clone())?
let config = match cli_args.subcommand() {
("testnet", Some(sub_cli_args)) => {
if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") {
return Err(
"Cannot specify --eth2-config and --bootstrap as it may result \
in ambiguity."
.into(),
);
}
process_testnet_subcommand(sub_cli_args, config)
}
KeySource::YamlKeypairs(path) => {
warn!(
log,
"Private keys are stored insecurely (plain text). Testing use only."
);
_ => return Err("You must use the testnet command. See '--help'.".into()),
}?;
load_keypairs_from_yaml(path.to_path_buf())?
}
};
// Check if it's an empty vector, and return none.
if keypairs.is_empty() {
Err(
"No validator keypairs were found, unable to proceed. To generate \
testing keypairs, see 'testnet range --help'."
.into(),
)
} else {
Ok(keypairs)
}
}
/// Saves a keypair to a file inside the appropriate validator directory. Returns the saved path filename.
#[allow(dead_code)]
pub fn save_key(&self, key: &Keypair) -> Result<PathBuf, Error> {
use std::os::unix::fs::PermissionsExt;
let validator_config_path = self.data_dir.join(key.identifier());
let key_path = validator_config_path.join(DEFAULT_PRIVATE_KEY_FILENAME);
fs::create_dir_all(&validator_config_path)?;
let mut key_file = File::create(&key_path)?;
let mut perm = key_file.metadata()?.permissions();
perm.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32);
key_file.set_permissions(perm)?;
bincode::serialize_into(&mut key_file, &key)
.map_err(|e| Error::new(ErrorKind::InvalidData, e))?;
Ok(key_path)
Ok(config)
}
}
/// Parses the `testnet` CLI subcommand, modifying the `config` based upon the parameters in
/// `cli_args`.
fn process_testnet_subcommand(cli_args: &ArgMatches, mut config: Config) -> Result<Config, String> {
config.key_source = match cli_args.subcommand() {
("insecure", Some(sub_cli_args)) => {
let first = sub_cli_args
.value_of("first_validator")
.ok_or_else(|| "No first validator supplied")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse first validator: {:?}", e))?;
let last = sub_cli_args
.value_of("last_validator")
.ok_or_else(|| "No last validator supplied")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse last validator: {:?}", e))?;
if last < first {
return Err("Cannot supply a last validator less than the first".to_string());
}
KeySource::InsecureKeypairs((first..last).collect())
}
_ => KeySource::Disk,
};
Ok(config)
}

View File

@ -1,20 +0,0 @@
use super::EpochDuties;
use types::{Epoch, PublicKey};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeDutiesError {
RemoteFailure(String),
}
/// Defines the methods required to obtain a validators shuffling from a Beacon Node.
pub trait BeaconNodeDuties: Send + Sync {
/// Gets the duties for all validators.
///
/// Returns a vector of EpochDuties for each validator public key. The entry will be None for
/// validators that are not activated.
fn request_duties(
&self,
epoch: Epoch,
pub_keys: &[PublicKey],
) -> Result<EpochDuties, BeaconNodeDutiesError>;
}

View File

@ -1,133 +0,0 @@
use std::collections::HashMap;
use std::fmt;
use std::ops::{Deref, DerefMut};
use types::{AttestationDuty, Epoch, PublicKey, Slot};
/// When work needs to be performed by a validator, this type is given back to the main service
/// which indicates all the information that required to process the work.
///
/// Note: This is calculated per slot, so a validator knows which slot is related to this struct.
#[derive(Debug, Clone)]
pub struct WorkInfo {
/// Validator needs to produce a block.
pub produce_block: bool,
/// Validator needs to produce an attestation. This supplies the required attestation data.
pub attestation_duty: Option<AttestationDuty>,
}
/// The information required for a validator to propose and attest during some epoch.
///
/// Generally obtained from a Beacon Node, this information contains the validators canonical index
/// (their sequence in the global validator induction process) and the "shuffling" for that index
/// for some epoch.
#[derive(Debug, PartialEq, Clone, Copy, Default)]
pub struct EpochDuty {
pub block_production_slot: Option<Slot>,
pub attestation_duty: AttestationDuty,
}
impl EpochDuty {
/// Returns `WorkInfo` if work needs to be done in the supplied `slot`
pub fn is_work_slot(&self, slot: Slot) -> Option<WorkInfo> {
// if validator is required to produce a slot return true
let produce_block = match self.block_production_slot {
Some(s) if s == slot => true,
_ => false,
};
// if the validator is required to attest to a index, create the data
let mut attestation_duty = None;
if self.attestation_duty.slot == slot {
attestation_duty = Some(self.attestation_duty)
}
if produce_block | attestation_duty.is_some() {
return Some(WorkInfo {
produce_block,
attestation_duty,
});
}
None
}
}
impl fmt::Display for EpochDuty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut display_block = String::from("None");
if let Some(block_slot) = self.block_production_slot {
display_block = block_slot.to_string();
}
write!(
f,
"produce block slot: {}, attestation slot: {}, attestation index: {}",
display_block, self.attestation_duty.slot, self.attestation_duty.index
)
}
}
/// Maps a list of keypairs (many validators) to an EpochDuty.
pub type EpochDuties = HashMap<PublicKey, Option<EpochDuty>>;
pub enum EpochDutiesMapError {
UnknownEpoch,
UnknownValidator,
}
/// Maps an `epoch` to some `EpochDuties` for a single validator.
pub struct EpochDutiesMap {
pub slots_per_epoch: u64,
pub map: HashMap<Epoch, EpochDuties>,
}
impl EpochDutiesMap {
pub fn new(slots_per_epoch: u64) -> Self {
Self {
slots_per_epoch,
map: HashMap::new(),
}
}
}
// Expose the hashmap methods
impl Deref for EpochDutiesMap {
type Target = HashMap<Epoch, EpochDuties>;
fn deref(&self) -> &Self::Target {
&self.map
}
}
impl DerefMut for EpochDutiesMap {
fn deref_mut(&mut self) -> &mut HashMap<Epoch, EpochDuties> {
&mut self.map
}
}
impl EpochDutiesMap {
/// Checks if the validator has work to do.
pub fn is_work_slot(
&self,
slot: Slot,
signer: &PublicKey,
) -> Result<Option<WorkInfo>, EpochDutiesMapError> {
let epoch = slot.epoch(self.slots_per_epoch);
let epoch_duties = self
.map
.get(&epoch)
.ok_or_else(|| EpochDutiesMapError::UnknownEpoch)?;
if let Some(epoch_duty) = epoch_duties.get(signer) {
if let Some(duty) = epoch_duty {
// Retrieves the duty for a validator at a given slot
Ok(duty.is_work_slot(slot))
} else {
// the validator isn't active
Ok(None)
}
} else {
// validator isn't known
Err(EpochDutiesMapError::UnknownValidator)
}
}
}
// TODO: add tests.

View File

@ -1,67 +0,0 @@
use super::beacon_node_duties::{BeaconNodeDuties, BeaconNodeDutiesError};
use super::epoch_duties::{EpochDuties, EpochDuty};
// to use if we manually specify a timeout
//use grpcio::CallOption;
use protos::services::{GetDutiesRequest, Validators};
use protos::services_grpc::ValidatorServiceClient;
use ssz::ssz_encode;
use std::collections::HashMap;
// use std::time::Duration;
use types::{AttestationDuty, Epoch, PublicKey, Slot};
impl BeaconNodeDuties for ValidatorServiceClient {
/// Requests all duties (block signing and committee attesting) from the Beacon Node (BN).
fn request_duties(
&self,
epoch: Epoch,
pub_keys: &[PublicKey],
) -> Result<EpochDuties, BeaconNodeDutiesError> {
// Get the required duties from all validators
// build the request
let mut req = GetDutiesRequest::new();
req.set_epoch(epoch.as_u64());
let mut validators = Validators::new();
validators.set_public_keys(pub_keys.iter().map(|v| ssz_encode(v)).collect());
req.set_validators(validators);
// set a timeout for requests
// let call_opt = CallOption::default().timeout(Duration::from_secs(2));
// send the request, get the duties reply
let reply = self
.get_validator_duties(&req)
.map_err(|err| BeaconNodeDutiesError::RemoteFailure(format!("{:?}", err)))?;
let mut epoch_duties: HashMap<PublicKey, Option<EpochDuty>> = HashMap::new();
for (index, validator_duty) in reply.get_active_validators().iter().enumerate() {
if !validator_duty.has_duty() {
// validator is inactive
epoch_duties.insert(pub_keys[index].clone(), None);
continue;
}
// active validator
let active_duty = validator_duty.get_duty();
let block_production_slot = {
if active_duty.has_block_production_slot() {
Some(Slot::from(active_duty.get_block_production_slot()))
} else {
None
}
};
let attestation_duty = AttestationDuty {
slot: Slot::from(active_duty.get_attestation_slot()),
index: active_duty.get_attestation_shard(),
committee_position: active_duty.get_committee_index() as usize,
committee_len: active_duty.get_committee_len() as usize,
};
let epoch_duty = EpochDuty {
block_production_slot,
attestation_duty,
};
epoch_duties.insert(pub_keys[index].clone(), Some(epoch_duty));
}
Ok(epoch_duties)
}
}

View File

@ -1,214 +0,0 @@
mod beacon_node_duties;
mod epoch_duties;
mod grpc;
// TODO: reintroduce tests
//#[cfg(test)]
//mod test_node;
pub use self::beacon_node_duties::{BeaconNodeDuties, BeaconNodeDutiesError};
use self::epoch_duties::{EpochDuties, EpochDutiesMapError};
pub use self::epoch_duties::{EpochDutiesMap, WorkInfo};
use super::signer::Signer;
use futures::Async;
use parking_lot::RwLock;
use slog::{debug, error, info};
use std::fmt::Display;
use std::sync::Arc;
use types::{Epoch, PublicKey, Slot};
#[derive(Debug, PartialEq, Clone)]
pub enum UpdateOutcome {
/// The `EpochDuties` were not updated during this poll.
NoChange(Epoch),
/// The `EpochDuties` for the `epoch` were previously unknown, but obtained in the poll.
NewDuties(Epoch, EpochDuties),
/// New `EpochDuties` were obtained, different to those which were previously known. This is
/// likely to be the result of chain re-organisation.
DutiesChanged(Epoch, EpochDuties),
}
#[derive(Debug, PartialEq)]
pub enum Error {
DutiesMapPoisoned,
BeaconNodeDutiesError(BeaconNodeDutiesError),
UnknownEpoch,
UnknownValidator,
}
/// A polling state machine which ensures the latest `EpochDuties` are obtained from the Beacon
/// Node.
///
/// This keeps track of all validator keys and required voting slots.
pub struct DutiesManager<U: BeaconNodeDuties, S: Signer> {
pub duties_map: RwLock<EpochDutiesMap>,
/// A list of all signer objects known to the validator service.
pub signers: Arc<Vec<S>>,
pub beacon_node: Arc<U>,
}
impl<U: BeaconNodeDuties, S: Signer + Display> DutiesManager<U, S> {
/// Check the Beacon Node for `EpochDuties`.
///
/// be a wall-clock (e.g., system time, remote server time, etc.).
fn update(&self, epoch: Epoch) -> Result<UpdateOutcome, Error> {
let public_keys: Vec<PublicKey> = self.signers.iter().map(Signer::to_public).collect();
let duties = self.beacon_node.request_duties(epoch, &public_keys)?;
{
// If these duties were known, check to see if they're updates or identical.
if let Some(known_duties) = self.duties_map.read().get(&epoch) {
if *known_duties == duties {
return Ok(UpdateOutcome::NoChange(epoch));
}
}
}
if !self.duties_map.read().contains_key(&epoch) {
//TODO: Remove clone by removing duties from outcome
self.duties_map.write().insert(epoch, duties.clone());
return Ok(UpdateOutcome::NewDuties(epoch, duties));
}
// duties have changed
//TODO: Duties could be large here. Remove from display and avoid the clone.
self.duties_map.write().insert(epoch, duties.clone());
Ok(UpdateOutcome::DutiesChanged(epoch, duties))
}
/// A future wrapping around `update()`. This will perform logic based upon the update
/// process and complete once the update has completed.
pub fn run_update(&self, epoch: Epoch, log: slog::Logger) -> Result<Async<()>, ()> {
match self.update(epoch) {
Err(error) => error!(log, "Epoch duties poll error"; "error" => format!("{:?}", error)),
Ok(UpdateOutcome::NoChange(epoch)) => {
debug!(log, "No change in duties"; "epoch" => epoch)
}
Ok(UpdateOutcome::DutiesChanged(epoch, duties)) => {
info!(log, "Duties changed (potential re-org)"; "epoch" => epoch, "duties" => format!("{:?}", duties))
}
Ok(UpdateOutcome::NewDuties(epoch, duties)) => {
info!(log, "New duties obtained"; "epoch" => epoch);
print_duties(&log, duties);
}
};
Ok(Async::Ready(()))
}
/// Returns a list of (index, WorkInfo) indicating all the validators that have work to perform
/// this slot.
pub fn get_current_work(&self, slot: Slot) -> Option<Vec<(usize, WorkInfo)>> {
let mut current_work: Vec<(usize, WorkInfo)> = Vec::new();
// if the map is poisoned, return None
let duties = self.duties_map.read();
for (index, validator_signer) in self.signers.iter().enumerate() {
match duties.is_work_slot(slot, &validator_signer.to_public()) {
Ok(Some(work_type)) => current_work.push((index, work_type)),
Ok(None) => {} // No work for this validator
//TODO: This should really log an error, as we shouldn't end up with an err here.
Err(_) => {} // Unknown epoch or validator, no work
}
}
if current_work.is_empty() {
return None;
}
Some(current_work)
}
}
//TODO: Use error_chain to handle errors
impl From<BeaconNodeDutiesError> for Error {
fn from(e: BeaconNodeDutiesError) -> Error {
Error::BeaconNodeDutiesError(e)
}
}
//TODO: Use error_chain to handle errors
impl<T> From<std::sync::PoisonError<T>> for Error {
fn from(_e: std::sync::PoisonError<T>) -> Error {
Error::DutiesMapPoisoned
}
}
impl From<EpochDutiesMapError> for Error {
fn from(e: EpochDutiesMapError) -> Error {
match e {
EpochDutiesMapError::UnknownEpoch => Error::UnknownEpoch,
EpochDutiesMapError::UnknownValidator => Error::UnknownValidator,
}
}
}
fn print_duties(log: &slog::Logger, duties: EpochDuties) {
for (pk, duty) in duties.iter() {
if let Some(display_duty) = duty {
info!(log, "Validator: {}",pk; "Duty" => format!("{}",display_duty));
} else {
info!(log, "Validator: {}",pk; "Duty" => "None");
}
}
}
/* TODO: Modify tests for new Duties Manager form
#[cfg(test)]
mod tests {
use super::test_node::TestBeaconNode;
use super::*;
use bls::Keypair;
use slot_clock::TestingSlotClock;
use types::Slot;
// TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160
//
// These tests should serve as a good example for future tests.
#[test]
pub fn polling() {
let spec = Arc::new(ChainSpec::mainnet());
let duties_map = Arc::new(EpochDutiesMap::new(T::slots_per_epoch()));
let keypair = Keypair::random();
let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(TestBeaconNode::default());
let manager = DutiesManager {
spec: spec.clone(),
pubkey: keypair.pk.clone(),
duties_map: duties_map.clone(),
slot_clock: slot_clock.clone(),
beacon_node: beacon_node.clone(),
};
// Configure response from the BeaconNode.
let duties = EpochDuties {
validator_index: 0,
block_production_slot: Some(Slot::new(10)),
};
beacon_node.set_next_shuffling_result(Ok(Some(duties)));
// Get the duties for the first time...
assert_eq!(
manager.poll(),
Ok(PollOutcome::NewDuties(Epoch::new(0), duties))
);
// Get the same duties again...
assert_eq!(manager.poll(), Ok(PollOutcome::NoChange(Epoch::new(0))));
// Return new duties.
let duties = EpochDuties {
validator_index: 0,
block_production_slot: Some(Slot::new(11)),
};
beacon_node.set_next_shuffling_result(Ok(Some(duties)));
assert_eq!(
manager.poll(),
Ok(PollOutcome::DutiesChanged(Epoch::new(0), duties))
);
// Return no duties.
beacon_node.set_next_shuffling_result(Ok(None));
assert_eq!(
manager.poll(),
Ok(PollOutcome::UnknownValidatorOrEpoch(Epoch::new(0)))
);
}
}
*/

View File

@ -1,32 +0,0 @@
use super::traits::{BeaconNode, BeaconNodeError};
use super::EpochDuties;
use bls::PublicKey;
use std::sync::RwLock;
use types::Epoch;
type ShufflingResult = Result<Option<EpochDuties>, BeaconNodeError>;
/// A test-only struct used to simulate a Beacon Node.
#[derive(Default)]
pub struct TestBeaconNode {
pub request_shuffling_input: RwLock<Option<(Epoch, PublicKey)>>,
pub request_shuffling_result: RwLock<Option<ShufflingResult>>,
}
impl TestBeaconNode {
/// Set the result to be returned when `request_shuffling` is called.
pub fn set_next_shuffling_result(&self, result: ShufflingResult) {
*self.request_shuffling_result.write().unwrap() = Some(result);
}
}
impl BeaconNode for TestBeaconNode {
/// Returns the value specified by the `set_next_shuffling_result`.
fn request_shuffling(&self, epoch: Epoch, public_key: &PublicKey) -> ShufflingResult {
*self.request_shuffling_input.write().unwrap() = Some((epoch, public_key.clone()));
match *self.request_shuffling_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("TestBeaconNode: produce_result == None"),
}
}
}

View File

@ -0,0 +1,426 @@
use crate::validator_store::ValidatorStore;
use environment::RuntimeContext;
use exit_future::Signal;
use futures::{Future, IntoFuture, Stream};
use parking_lot::RwLock;
use remote_beacon_node::{RemoteBeaconNode, ValidatorDuty};
use slog::{crit, error, info, trace, warn};
use slot_clock::SlotClock;
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, Epoch, EthSpec, PublicKey, Slot};
/// Delay this period of time after the slot starts. This allows the node to process the new slot.
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100);
/// Remove any duties where the `duties_epoch < current_epoch - PRUNE_DEPTH`.
const PRUNE_DEPTH: u64 = 4;
type BaseHashMap = HashMap<PublicKey, HashMap<Epoch, ValidatorDuty>>;
/// The outcome of inserting some `ValidatorDuty` into the `DutiesStore`.
enum InsertOutcome {
/// These are the first duties received for this validator.
NewValidator,
/// The duties for this given epoch were previously unknown and have been stored.
NewEpoch,
/// The duties were identical to some already in the store.
Identical,
/// There were duties for this validator and epoch in the store that were different to the ones
/// provided. The existing duties were replaced.
Replaced,
/// The given duties were invalid.
Invalid,
}
#[derive(Default)]
pub struct DutiesStore {
store: RwLock<BaseHashMap>,
}
impl DutiesStore {
fn block_producers(&self, slot: Slot, slots_per_epoch: u64) -> Vec<PublicKey> {
self.store
.read()
.iter()
// As long as a `HashMap` iterator does not return duplicate keys, neither will this
// function.
.filter_map(|(_validator_pubkey, validator_map)| {
let epoch = slot.epoch(slots_per_epoch);
validator_map.get(&epoch).and_then(|duties| {
if duties.block_proposal_slot == Some(slot) {
Some(duties.validator_pubkey.clone())
} else {
None
}
})
})
.collect()
}
fn attesters(&self, slot: Slot, slots_per_epoch: u64) -> Vec<ValidatorDuty> {
self.store
.read()
.iter()
// As long as a `HashMap` iterator does not return duplicate keys, neither will this
// function.
.filter_map(|(_validator_pubkey, validator_map)| {
let epoch = slot.epoch(slots_per_epoch);
validator_map.get(&epoch).and_then(|duties| {
if duties.attestation_slot == Some(slot) {
Some(duties)
} else {
None
}
})
})
.cloned()
.collect()
}
fn insert(&self, epoch: Epoch, duties: ValidatorDuty, slots_per_epoch: u64) -> InsertOutcome {
let mut store = self.store.write();
if !duties_match_epoch(&duties, epoch, slots_per_epoch) {
return InsertOutcome::Invalid;
}
if let Some(validator_map) = store.get_mut(&duties.validator_pubkey) {
if let Some(known_duties) = validator_map.get_mut(&epoch) {
if *known_duties == duties {
InsertOutcome::Identical
} else {
*known_duties = duties;
InsertOutcome::Replaced
}
} else {
validator_map.insert(epoch, duties);
InsertOutcome::NewEpoch
}
} else {
let validator_pubkey = duties.validator_pubkey.clone();
let mut validator_map = HashMap::new();
validator_map.insert(epoch, duties);
store.insert(validator_pubkey, validator_map);
InsertOutcome::NewValidator
}
}
fn prune(&self, prior_to: Epoch) {
self.store
.write()
.retain(|_validator_pubkey, validator_map| {
validator_map.retain(|epoch, _duties| *epoch >= prior_to);
!validator_map.is_empty()
});
}
}
pub struct DutiesServiceBuilder<T, E: EthSpec> {
validator_store: Option<ValidatorStore<T, E>>,
slot_clock: Option<T>,
beacon_node: Option<RemoteBeaconNode<E>>,
context: Option<RuntimeContext<E>>,
}
impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> {
pub fn new() -> Self {
Self {
validator_store: None,
slot_clock: None,
beacon_node: None,
context: None,
}
}
pub fn validator_store(mut self, store: ValidatorStore<T, E>) -> Self {
self.validator_store = Some(store);
self
}
pub fn slot_clock(mut self, slot_clock: T) -> Self {
self.slot_clock = Some(slot_clock);
self
}
pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self {
self.beacon_node = Some(beacon_node);
self
}
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
self.context = Some(context);
self
}
pub fn build(self) -> Result<DutiesService<T, E>, String> {
Ok(DutiesService {
inner: Arc::new(Inner {
store: Arc::new(DutiesStore::default()),
validator_store: self
.validator_store
.ok_or_else(|| "Cannot build DutiesService without validator_store")?,
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build DutiesService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build DutiesService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build DutiesService without runtime_context")?,
}),
})
}
}
/// Helper to minimise `Arc` usage.
pub struct Inner<T, E: EthSpec> {
store: Arc<DutiesStore>,
validator_store: ValidatorStore<T, E>,
slot_clock: T,
beacon_node: RemoteBeaconNode<E>,
context: RuntimeContext<E>,
}
/// Maintains a store of the duties for all voting validators in the `validator_store`.
///
/// Polls the beacon node at the start of each epoch, collecting duties for the current and next
/// epoch.
pub struct DutiesService<T, E: EthSpec> {
inner: Arc<Inner<T, E>>,
}
impl<T, E: EthSpec> Clone for DutiesService<T, E> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T, E: EthSpec> Deref for DutiesService<T, E> {
type Target = Inner<T, E>;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
/// Returns the pubkeys of the validators which are assigned to propose in the given slot.
///
/// In normal cases, there should be 0 or 1 validators returned. In extreme cases (i.e., deep forking)
///
/// It is possible that multiple validators have an identical proposal slot, however that is
/// likely the result of heavy forking (lol) or inconsistent beacon node connections.
pub fn block_producers(&self, slot: Slot) -> Vec<PublicKey> {
self.store.block_producers(slot, E::slots_per_epoch())
}
/// Returns all `ValidatorDuty` for the given `slot`.
pub fn attesters(&self, slot: Slot) -> Vec<ValidatorDuty> {
self.store.attesters(slot, E::slots_per_epoch())
}
/// Start the service that periodically polls the beacon node for validator duties.
pub fn start_update_service(&self, spec: &ChainSpec) -> Result<Signal, String> {
let log = self.context.log.clone();
let duration_to_next_slot = self
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot".to_string())?;
let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new(
Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT,
slot_duration,
)
};
let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone();
let log_1 = log.clone();
let log_2 = log.clone();
// Run an immediate update before starting the updater service.
self.context.executor.spawn(service.clone().do_update());
self.context.executor.spawn(
exit_fut
.until(
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| service.clone().do_update())
// Prevent any errors from escaping and stopping the interval.
.then(|_| Ok(())),
)
.map(move |_| info!(log_2, "Shutdown complete")),
);
Ok(exit_signal)
}
/// Attempt to download the duties of all managed validators for this epoch and the next.
fn do_update(&self) -> impl Future<Item = (), Error = ()> {
let service_1 = self.clone();
let service_2 = self.clone();
let service_3 = self.clone();
let log_1 = self.context.log.clone();
let log_2 = self.context.log.clone();
self.slot_clock
.now()
.ok_or_else(move || {
error!(log_1, "Duties manager failed to read slot clock");
})
.into_future()
.map(move |slot| {
let epoch = slot.epoch(E::slots_per_epoch());
if slot % E::slots_per_epoch() == 0 {
let prune_below = epoch - PRUNE_DEPTH;
trace!(
log_2,
"Pruning duties cache";
"pruning_below" => prune_below.as_u64(),
"current_epoch" => epoch.as_u64(),
);
service_1.store.prune(prune_below);
}
epoch
})
.and_then(move |epoch| {
let log = service_2.context.log.clone();
service_2.update_epoch(epoch).then(move |result| {
if let Err(e) = result {
error!(
log,
"Failed to get current epoch duties";
"http_error" => format!("{:?}", e)
);
}
let log = service_3.context.log.clone();
service_3.update_epoch(epoch + 1).map_err(move |e| {
error!(
log,
"Failed to get next epoch duties";
"http_error" => format!("{:?}", e)
);
})
})
})
.map(|_| ())
}
/// Attempt to download the duties of all managed validators for the given `epoch`.
fn update_epoch(self, epoch: Epoch) -> impl Future<Item = (), Error = String> {
let service_1 = self.clone();
let service_2 = self.clone();
let pubkeys = service_1.validator_store.voting_pubkeys();
service_1
.beacon_node
.http
.validator()
.get_duties_bulk(epoch, pubkeys.as_slice())
.map(move |all_duties| (epoch, all_duties))
.map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))
.map(move |(epoch, all_duties)| {
let log = service_2.context.log.clone();
let mut new_validator = 0;
let mut new_epoch = 0;
let mut identical = 0;
let mut replaced = 0;
let mut invalid = 0;
all_duties.into_iter().for_each(|duties| {
match service_2
.store
.insert(epoch, duties.clone(), E::slots_per_epoch())
{
InsertOutcome::NewValidator => {
info!(
log,
"First duty assignment for validator";
"proposal_slot" => format!("{:?}", &duties.block_proposal_slot),
"attestation_slot" => format!("{:?}", &duties.attestation_slot),
"validator" => format!("{:?}", &duties.validator_pubkey)
);
new_validator += 1
}
InsertOutcome::NewEpoch => new_epoch += 1,
InsertOutcome::Identical => identical += 1,
InsertOutcome::Replaced => replaced += 1,
InsertOutcome::Invalid => invalid += 1,
};
});
if invalid > 0 {
error!(
log,
"Received invalid duties from beacon node";
"bad_duty_count" => invalid,
"info" => "Duties are from wrong epoch."
)
}
trace!(
log,
"Performed duties update";
"identical" => identical,
"new_epoch" => new_epoch,
"new_validator" => new_validator,
"replaced" => replaced,
"epoch" => format!("{}", epoch)
);
if replaced > 0 {
warn!(
log,
"Duties changed during routine update";
"info" => "Chain re-org likely occurred."
)
}
})
}
}
/// Returns `true` if the slots in the `duties` are from the given `epoch`
fn duties_match_epoch(duties: &ValidatorDuty, epoch: Epoch, slots_per_epoch: u64) -> bool {
if let Some(attestation_slot) = duties.attestation_slot {
if attestation_slot.epoch(slots_per_epoch) != epoch {
return false;
}
}
if let Some(block_proposal_slot) = duties.block_proposal_slot {
if block_proposal_slot.epoch(slots_per_epoch) != epoch {
return false;
}
}
true
}

View File

@ -1,12 +0,0 @@
use error_chain::error_chain;
error_chain! {
links { }
errors {
SystemTimeError(t: String ) {
description("Error reading system time"),
display("SystemTimeError: '{}'", t)
}
}
}

View File

@ -0,0 +1,173 @@
use environment::RuntimeContext;
use exit_future::Signal;
use futures::{Future, Stream};
use parking_lot::RwLock;
use remote_beacon_node::RemoteBeaconNode;
use slog::{crit, info, trace};
use slot_clock::SlotClock;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::timer::Interval;
use types::{ChainSpec, EthSpec, Fork};
/// Delay this period of time after the slot starts. This allows the node to process the new slot.
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80);
/// Builds a `ForkService`.
pub struct ForkServiceBuilder<T, E: EthSpec> {
fork: Option<Fork>,
slot_clock: Option<T>,
beacon_node: Option<RemoteBeaconNode<E>>,
context: Option<RuntimeContext<E>>,
}
impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> {
pub fn new() -> Self {
Self {
fork: None,
slot_clock: None,
beacon_node: None,
context: None,
}
}
pub fn slot_clock(mut self, slot_clock: T) -> Self {
self.slot_clock = Some(slot_clock);
self
}
pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self {
self.beacon_node = Some(beacon_node);
self
}
pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self {
self.context = Some(context);
self
}
pub fn build(self) -> Result<ForkService<T, E>, String> {
Ok(ForkService {
inner: Arc::new(Inner {
fork: RwLock::new(self.fork),
slot_clock: self
.slot_clock
.ok_or_else(|| "Cannot build ForkService without slot_clock")?,
beacon_node: self
.beacon_node
.ok_or_else(|| "Cannot build ForkService without beacon_node")?,
context: self
.context
.ok_or_else(|| "Cannot build ForkService without runtime_context")?,
}),
})
}
}
/// Helper to minimise `Arc` usage.
pub struct Inner<T, E: EthSpec> {
fork: RwLock<Option<Fork>>,
beacon_node: RemoteBeaconNode<E>,
context: RuntimeContext<E>,
slot_clock: T,
}
/// Attempts to download the `Fork` struct from the beacon node at the start of each epoch.
pub struct ForkService<T, E: EthSpec> {
inner: Arc<Inner<T, E>>,
}
impl<T, E: EthSpec> Clone for ForkService<T, E> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T, E: EthSpec> Deref for ForkService<T, E> {
type Target = Inner<T, E>;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
/// Returns the last fork downloaded from the beacon node, if any.
pub fn fork(&self) -> Option<Fork> {
self.fork.read().clone()
}
/// Starts the service that periodically polls for the `Fork`.
pub fn start_update_service(&self, spec: &ChainSpec) -> Result<Signal, String> {
let log = self.context.log.clone();
let duration_to_next_epoch = self
.slot_clock
.duration_to_next_epoch(E::slots_per_epoch())
.ok_or_else(|| "Unable to determine duration to next epoch".to_string())?;
let interval = {
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
Interval::new(
Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT,
slot_duration * E::slots_per_epoch() as u32,
)
};
let (exit_signal, exit_fut) = exit_future::signal();
let service = self.clone();
let log_1 = log.clone();
let log_2 = log.clone();
// Run an immediate update before starting the updater service.
self.context.executor.spawn(service.clone().do_update());
self.context.executor.spawn(
exit_fut
.until(
interval
.map_err(move |e| {
crit! {
log_1,
"Timer thread failed";
"error" => format!("{}", e)
}
})
.for_each(move |_| service.do_update())
// Prevent any errors from escaping and stopping the interval.
.then(|_| Ok(())),
)
.map(move |_| info!(log_2, "Shutdown complete")),
);
Ok(exit_signal)
}
/// Attempts to download the `Fork` from the server.
fn do_update(&self) -> impl Future<Item = (), Error = ()> {
let service_1 = self.clone();
let log_1 = service_1.context.log.clone();
let log_2 = service_1.context.log.clone();
self.inner
.beacon_node
.http
.beacon()
.get_fork()
.map(move |fork| *(service_1.fork.write()) = Some(fork))
.map(move |_| trace!(log_1, "Fork update success"))
.map_err(move |e| {
trace!(
log_2,
"Fork update failed";
"error" => format!("Error retrieving fork: {:?}", e)
)
})
// Returning an error will stop the interval. This is not desired, a single failure
// should not stop all future attempts.
.then(|_| Ok(()))
}
}

View File

@ -1,258 +1,266 @@
mod attestation_producer;
mod block_producer;
mod attestation_service;
mod block_service;
mod cli;
mod config;
mod duties;
mod error;
mod service;
mod signer;
mod duties_service;
mod fork_service;
mod validator_store;
pub mod validator_directory;
pub use cli::cli_app;
pub use config::Config;
pub use config::{Config, KeySource};
use attestation_service::{AttestationService, AttestationServiceBuilder};
use block_service::{BlockService, BlockServiceBuilder};
use clap::ArgMatches;
use config::{Config as ClientConfig, KeySource};
use duties_service::{DutiesService, DutiesServiceBuilder};
use environment::RuntimeContext;
use eth2_config::Eth2Config;
use exit_future::Signal;
use futures::Stream;
use lighthouse_bootstrap::Bootstrapper;
use parking_lot::RwLock;
use protos::services_grpc::ValidatorServiceClient;
use service::Service;
use slog::{error, info, warn, Logger};
use fork_service::{ForkService, ForkServiceBuilder};
use futures::{
future::{self, loop_fn, Loop},
Future, IntoFuture,
};
use remote_beacon_node::RemoteBeaconNode;
use slog::{error, info, Logger};
use slot_clock::SlotClock;
use std::path::PathBuf;
use std::sync::Arc;
use slot_clock::SystemTimeSlotClock;
use std::time::{Duration, Instant};
use tokio::timer::Interval;
use types::{EthSpec, Keypair};
use tokio::timer::Delay;
use types::EthSpec;
use validator_store::ValidatorStore;
/// A fixed amount of time after a slot to perform operations. This gives the node time to complete
/// per-slot processes.
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100);
/// The interval between attempts to contact the beacon node during startup.
const RETRY_DELAY: Duration = Duration::from_secs(2);
/// The global timeout for HTTP requests to the beacon node.
const HTTP_TIMEOUT: Duration = Duration::from_secs(12);
#[derive(Clone)]
pub struct ProductionValidatorClient<T: EthSpec> {
context: RuntimeContext<T>,
service: Arc<Service<ValidatorServiceClient, Keypair, T>>,
exit_signals: Arc<RwLock<Vec<Signal>>>,
duties_service: DutiesService<SystemTimeSlotClock, T>,
fork_service: ForkService<SystemTimeSlotClock, T>,
block_service: BlockService<SystemTimeSlotClock, T>,
attestation_service: AttestationService<SystemTimeSlotClock, T>,
exit_signals: Vec<Signal>,
}
impl<T: EthSpec> ProductionValidatorClient<T> {
/// Instantiates the validator client, _without_ starting the timers to trigger block
/// and attestation production.
pub fn new_from_cli(context: RuntimeContext<T>, matches: &ArgMatches) -> Result<Self, String> {
let mut log = context.log.clone();
let (client_config, eth2_config) = get_configs(&matches, &mut log)
.map_err(|e| format!("Unable to initialize config: {}", e))?;
info!(
log,
"Starting validator client";
"datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(),
);
let service: Service<ValidatorServiceClient, Keypair, T> =
Service::initialize_service(client_config, eth2_config, log.clone())
.map_err(|e| e.to_string())?;
Ok(Self {
context,
service: Arc::new(service),
exit_signals: Arc::new(RwLock::new(vec![])),
})
pub fn new_from_cli(
context: RuntimeContext<T>,
cli_args: &ArgMatches,
) -> impl Future<Item = Self, Error = String> {
Config::from_cli(&cli_args)
.into_future()
.map_err(|e| format!("Unable to initialize config: {}", e))
.and_then(|config| Self::new(context, config))
}
/// Starts the timers to trigger block and attestation production.
pub fn start_service(&self) -> Result<(), String> {
let service = self.clone().service;
let log = self.context.log.clone();
let duration_to_next_slot = service
.slot_clock
.duration_to_next_slot()
.ok_or_else(|| "Unable to determine duration to next slot. Exiting.".to_string())?;
// set up the validator work interval - start at next slot and proceed every slot
let interval = {
// Set the interval to start at the next slot, and every slot after
let slot_duration = Duration::from_millis(service.spec.milliseconds_per_slot);
//TODO: Handle checked add correctly
Interval::new(Instant::now() + duration_to_next_slot, slot_duration)
};
if service.slot_clock.now().is_none() {
warn!(
log,
"Starting node prior to genesis";
);
}
/// Instantiates the validator client, _without_ starting the timers to trigger block
/// and attestation production.
pub fn new(
mut context: RuntimeContext<T>,
config: Config,
) -> impl Future<Item = Self, Error = String> {
let log_1 = context.log.clone();
let log_2 = context.log.clone();
let log_3 = context.log.clone();
info!(
log,
"Waiting for next slot";
"seconds_to_wait" => duration_to_next_slot.as_secs()
log_1,
"Starting validator client";
"beacon_node" => &config.http_server,
"datadir" => format!("{:?}", config.data_dir),
);
let (exit_signal, exit_fut) = exit_future::signal();
RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT)
.map_err(|e| format!("Unable to init beacon node http client: {}", e))
.into_future()
.and_then(move |beacon_node| wait_for_node(beacon_node, log_2))
.and_then(|beacon_node| {
beacon_node
.http
.spec()
.get_eth2_config()
.map(|eth2_config| (beacon_node, eth2_config))
.map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))
})
.and_then(|(beacon_node, eth2_config)| {
beacon_node
.http
.beacon()
.get_genesis_time()
.map(|genesis_time| (beacon_node, eth2_config, genesis_time))
.map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))
})
.and_then(move |(beacon_node, remote_eth2_config, genesis_time)| {
// Do not permit a connection to a beacon node using different spec constants.
if context.eth2_config.spec_constants != remote_eth2_config.spec_constants {
return Err(format!(
"Beacon node is using an incompatible spec. Got {}, expected {}",
remote_eth2_config.spec_constants, context.eth2_config.spec_constants
));
}
self.exit_signals.write().push(exit_signal);
// Note: here we just assume the spec variables of the remote node. This is very useful
// for testnets, but perhaps a security issue when it comes to mainnet.
//
// A damaging attack would be for a beacon node to convince the validator client of a
// different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being
// produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant
// for Lighthouse.
context.eth2_config = remote_eth2_config;
/* kick off the core service */
self.context.executor.spawn(
interval
.map_err(move |e| {
error! {
log,
"Timer thread failed";
"error" => format!("{}", e)
}
let slot_clock = SystemTimeSlotClock::new(
context.eth2_config.spec.genesis_slot,
Duration::from_secs(genesis_time),
Duration::from_millis(context.eth2_config.spec.milliseconds_per_slot),
);
let fork_service = ForkServiceBuilder::new()
.slot_clock(slot_clock.clone())
.beacon_node(beacon_node.clone())
.runtime_context(context.service_context("fork"))
.build()?;
let validator_store: ValidatorStore<SystemTimeSlotClock, T> =
match &config.key_source {
// Load pre-existing validators from the data dir.
//
// Use the `account_manager` to generate these files.
KeySource::Disk => ValidatorStore::load_from_disk(
config.data_dir.clone(),
context.eth2_config.spec.clone(),
fork_service.clone(),
log_3.clone(),
)?,
// Generate ephemeral insecure keypairs for testing purposes.
//
// Do not use in production.
KeySource::InsecureKeypairs(indices) => {
ValidatorStore::insecure_ephemeral_validators(
&indices,
context.eth2_config.spec.clone(),
fork_service.clone(),
log_3.clone(),
)?
}
};
info!(
log_3,
"Loaded validator keypair store";
"voting_validators" => validator_store.num_voting_validators()
);
let duties_service = DutiesServiceBuilder::new()
.slot_clock(slot_clock.clone())
.validator_store(validator_store.clone())
.beacon_node(beacon_node.clone())
.runtime_context(context.service_context("duties"))
.build()?;
let block_service = BlockServiceBuilder::new()
.duties_service(duties_service.clone())
.slot_clock(slot_clock.clone())
.validator_store(validator_store.clone())
.beacon_node(beacon_node.clone())
.runtime_context(context.service_context("block"))
.build()?;
let attestation_service = AttestationServiceBuilder::new()
.duties_service(duties_service.clone())
.slot_clock(slot_clock)
.validator_store(validator_store)
.beacon_node(beacon_node)
.runtime_context(context.service_context("attestation"))
.build()?;
Ok(Self {
context,
duties_service,
fork_service,
block_service,
attestation_service,
exit_signals: vec![],
})
.and_then(move |_| if exit_fut.is_live() { Ok(()) } else { Err(()) })
.for_each(move |_| {
// wait for node to process
std::thread::sleep(TIME_DELAY_FROM_SLOT);
// if a non-fatal error occurs, proceed to the next slot.
let _ignore_error = service.per_slot_execution();
// completed a slot process
Ok(())
}),
);
})
}
pub fn start_service(&mut self) -> Result<(), String> {
let duties_exit = self
.duties_service
.start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start duties service: {}", e))?;
let fork_exit = self
.fork_service
.start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start fork service: {}", e))?;
let block_exit = self
.block_service
.start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start block service: {}", e))?;
let attestation_exit = self
.attestation_service
.start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start attestation service: {}", e))?;
self.exit_signals = vec![duties_exit, fork_exit, block_exit, attestation_exit];
Ok(())
}
}
/// Parses the CLI arguments and attempts to load the client and eth2 configuration.
///
/// This is not a pure function, it reads from disk and may contact network servers.
fn get_configs(
cli_args: &ArgMatches,
mut log: &mut Logger,
) -> Result<(ClientConfig, Eth2Config), String> {
let mut client_config = ClientConfig::default();
/// Request the version from the node, looping back and trying again on failure. Exit once the node
/// has been contacted.
fn wait_for_node<E: EthSpec>(
beacon_node: RemoteBeaconNode<E>,
log: Logger,
) -> impl Future<Item = RemoteBeaconNode<E>, Error = String> {
// Try to get the version string from the node, looping until success is returned.
loop_fn(beacon_node.clone(), move |beacon_node| {
let log = log.clone();
beacon_node
.clone()
.http
.node()
.get_version()
.map_err(|e| format!("{:?}", e))
.then(move |result| {
let future: Box<dyn Future<Item = Loop<_, _>, Error = String> + Send> = match result
{
Ok(version) => {
info!(
log,
"Connected to beacon node";
"version" => version,
);
client_config.apply_cli_args(&cli_args, &mut log)?;
Box::new(future::ok(Loop::Break(beacon_node)))
}
Err(e) => {
error!(
log,
"Unable to connect to beacon node";
"error" => format!("{:?}", e),
);
if let Some(server) = cli_args.value_of("server") {
client_config.server = server.to_string();
}
Box::new(
Delay::new(Instant::now() + RETRY_DELAY)
.map_err(|e| format!("Failed to trigger delay: {:?}", e))
.and_then(|_| future::ok(Loop::Continue(beacon_node))),
)
}
};
if let Some(port) = cli_args.value_of("server-http-port") {
client_config.server_http_port = port
.parse::<u16>()
.map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?;
}
if let Some(port) = cli_args.value_of("server-grpc-port") {
client_config.server_grpc_port = port
.parse::<u16>()
.map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?;
}
info!(
*log,
"Beacon node connection info";
"grpc_port" => client_config.server_grpc_port,
"http_port" => client_config.server_http_port,
"server" => &client_config.server,
);
let (client_config, eth2_config) = match cli_args.subcommand() {
("testnet", Some(sub_cli_args)) => {
if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") {
return Err(
"Cannot specify --eth2-config and --bootstrap as it may result \
in ambiguity."
.into(),
);
}
process_testnet_subcommand(sub_cli_args, client_config, log)
}
_ => return Err("You must use the testnet command. See '--help'.".into()),
}?;
Ok((client_config, eth2_config))
}
/// Parses the `testnet` CLI subcommand.
///
/// This is not a pure function, it reads from disk and may contact network servers.
fn process_testnet_subcommand(
cli_args: &ArgMatches,
mut client_config: ClientConfig,
log: &Logger,
) -> Result<(ClientConfig, Eth2Config), String> {
let eth2_config = if cli_args.is_present("bootstrap") {
info!(log, "Connecting to bootstrap server");
let bootstrapper = Bootstrapper::connect(
format!(
"http://{}:{}",
client_config.server, client_config.server_http_port
),
&log,
)?;
let eth2_config = bootstrapper.eth2_config()?;
info!(
log,
"Bootstrapped eth2 config via HTTP";
"slot_time_millis" => eth2_config.spec.milliseconds_per_slot,
"spec" => &eth2_config.spec_constants,
);
eth2_config
} else {
match cli_args.value_of("spec") {
Some("mainnet") => Eth2Config::mainnet(),
Some("minimal") => Eth2Config::minimal(),
Some("interop") => Eth2Config::interop(),
_ => return Err("No --spec flag provided. See '--help'.".into()),
}
};
client_config.key_source = match cli_args.subcommand() {
("insecure", Some(sub_cli_args)) => {
let first = sub_cli_args
.value_of("first_validator")
.ok_or_else(|| "No first validator supplied")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse first validator: {:?}", e))?;
let count = sub_cli_args
.value_of("validator_count")
.ok_or_else(|| "No validator count supplied")?
.parse::<usize>()
.map_err(|e| format!("Unable to parse validator count: {:?}", e))?;
info!(
log,
"Generating unsafe testing keys";
"first_validator" => first,
"count" => count
);
KeySource::TestingKeypairRange(first..first + count)
}
("interop-yaml", Some(sub_cli_args)) => {
let path = sub_cli_args
.value_of("path")
.ok_or_else(|| "No yaml path supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Unable to parse yaml path: {:?}", e))?;
info!(
log,
"Loading keypairs from interop YAML format";
"path" => format!("{:?}", path),
);
KeySource::YamlKeypairs(path)
}
_ => KeySource::Disk,
};
Ok((client_config, eth2_config))
future
})
})
.map(|_| beacon_node)
}

View File

@ -1,363 +0,0 @@
/// The Validator Client service.
///
/// Connects to a beacon node and negotiates the correct chain id.
///
/// Once connected, the service loads known validators keypairs from disk. Every slot,
/// the service pings the beacon node, asking for new duties for each of the validators.
///
/// When a validator needs to either produce a block or sign an attestation, it requests the
/// data from the beacon node and performs the signing before publishing the block to the beacon
/// node.
use crate::attestation_producer::AttestationProducer;
use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer};
use crate::config::Config as ValidatorConfig;
use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap};
use crate::error as error_chain;
use crate::signer::Signer;
use bls::Keypair;
use eth2_config::Eth2Config;
use grpcio::{ChannelBuilder, EnvBuilder};
use parking_lot::RwLock;
use protos::services::Empty;
use protos::services_grpc::{
AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient,
ValidatorServiceClient,
};
use slog::{crit, error, info, trace, warn};
use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
use types::{ChainSpec, Epoch, EthSpec, Fork, Slot};
/// The validator service. This is the main thread that executes and maintains validator
/// duties.
//TODO: Generalize the BeaconNode types to use testing
pub struct Service<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> {
/// The node's current fork version we are processing on.
fork: Fork,
/// The slot clock for this service.
pub slot_clock: SystemTimeSlotClock,
/// The slot that is currently, or was previously processed by the service.
current_slot: RwLock<Option<Slot>>,
slots_per_epoch: u64,
/// The chain specification for this clients instance.
pub spec: Arc<ChainSpec>,
/// The duties manager which maintains the state of when to perform actions.
duties_manager: Arc<DutiesManager<B, S>>,
// GRPC Clients
/// The beacon block GRPC client.
beacon_block_client: Arc<BeaconBlockGrpcClient>,
/// The attester GRPC client.
attestation_client: Arc<AttestationServiceClient>,
/// The validator client logger.
log: slog::Logger,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> Service<ValidatorServiceClient, Keypair, E> {
/// Initial connection to the beacon node to determine its properties.
///
/// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients
/// and returns an instance of the service.
pub fn initialize_service(
client_config: ValidatorConfig,
eth2_config: Eth2Config,
log: slog::Logger,
) -> error_chain::Result<Service<ValidatorServiceClient, Keypair, E>> {
let server_url = format!(
"{}:{}",
client_config.server, client_config.server_grpc_port
);
let env = Arc::new(EnvBuilder::new().build());
// Beacon node gRPC beacon node endpoints.
let beacon_node_client = {
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
BeaconNodeServiceClient::new(ch)
};
// retrieve node information and validate the beacon node
let node_info = loop {
match beacon_node_client.info(&Empty::new()) {
Err(e) => {
let retry_seconds = 5;
warn!(
log,
"Could not connect to beacon node";
"error" => format!("{:?}", e),
"retry_in" => format!("{} seconds", retry_seconds),
);
std::thread::sleep(Duration::from_secs(retry_seconds));
continue;
}
Ok(info) => {
// verify the node's network id
if eth2_config.spec.network_id != info.network_id as u8 {
error!(
log,
"Beacon Node's genesis time is in the future. No work to do.\n Exiting"
);
return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", eth2_config.spec.network_id, info.network_id).into());
}
break info;
}
};
};
// build requisite objects to form Self
let genesis_time = node_info.get_genesis_time();
let genesis_slot = Slot::from(node_info.get_genesis_slot());
info!(
log,
"Beacon node connected";
"version" => node_info.version.clone(),
"network_id" => node_info.network_id,
"genesis_time" => genesis_time
);
let proto_fork = node_info.get_fork();
let mut previous_version: [u8; 4] = [0; 4];
let mut current_version: [u8; 4] = [0; 4];
previous_version.copy_from_slice(&proto_fork.get_previous_version()[..4]);
current_version.copy_from_slice(&proto_fork.get_current_version()[..4]);
let fork = Fork {
previous_version,
current_version,
epoch: Epoch::from(proto_fork.get_epoch()),
};
// initialize the RPC clients
// Beacon node gRPC beacon block endpoints.
let beacon_block_client = {
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
let beacon_block_service_client = Arc::new(BeaconBlockServiceClient::new(ch));
// a wrapper around the service client to implement the beacon block node trait
Arc::new(BeaconBlockGrpcClient::new(beacon_block_service_client))
};
// Beacon node gRPC validator endpoints.
let validator_client = {
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
Arc::new(ValidatorServiceClient::new(ch))
};
//Beacon node gRPC attester endpoints.
let attestation_client = {
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
Arc::new(AttestationServiceClient::new(ch))
};
// build the validator slot clock
let slot_clock = SystemTimeSlotClock::new(
genesis_slot,
Duration::from_secs(genesis_time),
Duration::from_millis(eth2_config.spec.milliseconds_per_slot),
);
/* Generate the duties manager */
// Load generated keypairs
let keypairs = Arc::new(client_config.fetch_keys(&log)?);
let slots_per_epoch = E::slots_per_epoch();
// TODO: keypairs are randomly generated; they should be loaded from a file or generated.
// https://github.com/sigp/lighthouse/issues/160
//let keypairs = Arc::new(generate_deterministic_keypairs(8));
// Builds a mapping of Epoch -> Map(PublicKey, EpochDuty)
// where EpochDuty contains slot numbers and attestation data that each validator needs to
// produce work on.
let duties_map = RwLock::new(EpochDutiesMap::new(slots_per_epoch));
// builds a manager which maintains the list of current duties for all known validators
// and can check when a validator needs to perform a task.
let duties_manager = Arc::new(DutiesManager {
duties_map,
// these are abstract objects capable of signing
signers: keypairs,
beacon_node: validator_client,
});
let spec = Arc::new(eth2_config.spec);
Ok(Service {
fork,
slot_clock,
current_slot: RwLock::new(None),
slots_per_epoch,
spec,
duties_manager,
beacon_block_client,
attestation_client,
log,
_phantom: PhantomData,
})
}
}
impl<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> Service<B, S, E> {
/// The execution logic that runs every slot.
// Errors are logged to output, and core execution continues unless fatal errors occur.
pub fn per_slot_execution(&self) -> error_chain::Result<()> {
/* get the new current slot and epoch */
self.update_current_slot()?;
/* check for new duties */
self.check_for_duties();
/* process any required duties for validators */
self.process_duties();
trace!(
self.log,
"Per slot execution finished";
);
Ok(())
}
/// Updates the known current slot and epoch.
fn update_current_slot(&self) -> error_chain::Result<()> {
let wall_clock_slot = self
.slot_clock
.now()
.ok_or_else::<error_chain::Error, _>(|| {
"Genesis is not in the past. Exiting.".into()
})?;
let wall_clock_epoch = wall_clock_slot.epoch(self.slots_per_epoch);
let mut current_slot = self.current_slot.write();
// this is a non-fatal error. If the slot clock repeats, the node could
// have been slow to process the previous slot and is now duplicating tasks.
// We ignore duplicated but raise a critical error.
if let Some(current_slot) = *current_slot {
if wall_clock_slot <= current_slot {
crit!(
self.log,
"The validator tried to duplicate a slot. Likely missed the previous slot"
);
return Err("Duplicate slot".into());
}
}
*current_slot = Some(wall_clock_slot);
info!(self.log, "Processing"; "slot" => wall_clock_slot.as_u64(), "epoch" => wall_clock_epoch.as_u64());
Ok(())
}
/// For all known validator keypairs, update any known duties from the beacon node.
fn check_for_duties(&self) {
let cloned_manager = self.duties_manager.clone();
let cloned_log = self.log.clone();
let current_epoch = self
.current_slot
.read()
.expect("The current slot must be updated before checking for duties")
.epoch(self.slots_per_epoch);
trace!(
self.log,
"Checking for duties";
"epoch" => current_epoch
);
// spawn a new thread separate to the runtime
// TODO: Handle thread termination/timeout
// TODO: Add duties thread back in, with channel to process duties in duty change.
// leave sequential for now.
//std::thread::spawn(move || {
// the return value is a future which returns ready.
// built to be compatible with the tokio runtime.
let _empty = cloned_manager.run_update(current_epoch, cloned_log.clone());
//});
}
/// If there are any duties to process, spawn a separate thread and perform required actions.
fn process_duties(&self) {
if let Some(work) = self.duties_manager.get_current_work(
self.current_slot
.read()
.expect("The current slot must be updated before processing duties"),
) {
trace!(
self.log,
"Processing duties";
"work_items" => work.len()
);
for (signer_index, work_type) in work {
if work_type.produce_block {
// we need to produce a block
// spawns a thread to produce a beacon block
let signers = self.duties_manager.signers.clone(); // this is an arc
let fork = self.fork.clone();
let slot = self
.current_slot
.read()
.expect("The current slot must be updated before processing duties");
let spec = self.spec.clone();
let beacon_node = self.beacon_block_client.clone();
let log = self.log.clone();
let slots_per_epoch = self.slots_per_epoch;
std::thread::spawn(move || {
info!(
log,
"Producing a block";
"validator"=> format!("{}", signers[signer_index]),
"slot"=> slot
);
let signer = &signers[signer_index];
let mut block_producer = BlockProducer {
fork,
slot,
spec,
beacon_node,
signer,
slots_per_epoch,
_phantom: PhantomData::<E>,
log,
};
block_producer.handle_produce_block();
});
}
if work_type.attestation_duty.is_some() {
// we need to produce an attestation
// spawns a thread to produce and sign an attestation
let slot = self
.current_slot
.read()
.expect("The current slot must be updated before processing duties");
let signers = self.duties_manager.signers.clone(); // this is an arc
let fork = self.fork.clone();
let spec = self.spec.clone();
let beacon_node = self.attestation_client.clone();
let log = self.log.clone();
let slots_per_epoch = self.slots_per_epoch;
std::thread::spawn(move || {
info!(
log,
"Producing an attestation";
"validator"=> format!("{}", signers[signer_index]),
"slot"=> slot
);
let signer = &signers[signer_index];
let mut attestation_producer = AttestationProducer {
fork,
duty: work_type.attestation_duty.expect("Should never be none"),
spec,
beacon_node,
signer,
slots_per_epoch,
_phantom: PhantomData::<E>,
};
attestation_producer.handle_produce_attestation(log);
});
}
}
}
}
}

View File

@ -1,21 +0,0 @@
use std::fmt::Display;
use types::{Keypair, PublicKey, Signature};
/// Signs message using an internally-maintained private key.
pub trait Signer: Display + Send + Sync + Clone {
fn sign_message(&self, message: &[u8], domain: u64) -> Option<Signature>;
/// Returns a public key for the signer object.
fn to_public(&self) -> PublicKey;
}
/* Implements Display and Signer for Keypair */
impl Signer for Keypair {
fn to_public(&self) -> PublicKey {
self.pk.clone()
}
fn sign_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
Some(Signature::new(message, domain, &self.sk))
}
}

View File

@ -0,0 +1,404 @@
use bls::get_withdrawal_credentials;
use deposit_contract::eth1_tx_data;
use hex;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::os::unix::fs::PermissionsExt;
use std::path::PathBuf;
use types::{
test_utils::generate_deterministic_keypair, ChainSpec, DepositData, Hash256, Keypair,
PublicKey, SecretKey, Signature,
};
const VOTING_KEY_PREFIX: &str = "voting";
const WITHDRAWAL_KEY_PREFIX: &str = "withdrawal";
const ETH1_DEPOSIT_DATA_FILE: &str = "eth1_deposit_data.rlp";
/// Returns the filename of a keypair file.
fn keypair_file(prefix: &str) -> String {
format!("{}_keypair", prefix)
}
/// Returns the name of the folder to be generated for a validator with the given voting key.
fn dir_name(voting_pubkey: &PublicKey) -> String {
format!("0x{}", hex::encode(voting_pubkey.as_ssz_bytes()))
}
/// Represents the files/objects for each dedicated lighthouse validator directory.
///
/// Generally lives in `~/.lighthouse/validators/`.
#[derive(Debug, Clone, PartialEq)]
pub struct ValidatorDirectory {
pub directory: PathBuf,
pub voting_keypair: Option<Keypair>,
pub withdrawal_keypair: Option<Keypair>,
pub deposit_data: Option<Vec<u8>>,
}
impl ValidatorDirectory {
/// Attempts to load a validator from the given directory, requiring only components necessary
/// for signing messages.
pub fn load_for_signing(directory: PathBuf) -> Result<Self, String> {
if !directory.exists() {
return Err(format!(
"Validator directory does not exist: {:?}",
directory
));
}
Ok(Self {
voting_keypair: Some(
load_keypair(directory.clone(), VOTING_KEY_PREFIX)
.map_err(|e| format!("Unable to get voting keypair: {}", e))?,
),
withdrawal_keypair: load_keypair(directory.clone(), WITHDRAWAL_KEY_PREFIX).ok(),
deposit_data: load_eth1_deposit_data(directory.clone()).ok(),
directory,
})
}
}
/// Load a `Keypair` from a file.
fn load_keypair(base_path: PathBuf, file_prefix: &str) -> Result<Keypair, String> {
let path = base_path.join(keypair_file(file_prefix));
if !path.exists() {
return Err(format!("Keypair file does not exist: {:?}", path));
}
let mut bytes = vec![];
File::open(&path)
.map_err(|e| format!("Unable to open keypair file: {}", e))?
.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read keypair file: {}", e))?;
SszEncodableKeypair::from_ssz_bytes(&bytes)
.map(Into::into)
.map_err(|e| format!("Unable to decode keypair: {:?}", e))
}
/// Load eth1_deposit_data from file.
fn load_eth1_deposit_data(base_path: PathBuf) -> Result<Vec<u8>, String> {
let path = base_path.join(ETH1_DEPOSIT_DATA_FILE);
if !path.exists() {
return Err(format!("Eth1 deposit data file does not exist: {:?}", path));
}
let mut bytes = vec![];
File::open(&path)
.map_err(|e| format!("Unable to open eth1 deposit data file: {}", e))?
.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read eth1 deposit data file: {}", e))?;
let string = String::from_utf8_lossy(&bytes);
if string.starts_with("0x") {
hex::decode(&string[2..])
.map_err(|e| format!("Unable to decode eth1 data file as hex: {}", e))
} else {
Err(format!("String did not start with 0x: {}", string))
}
}
/// A helper struct to allow SSZ enc/dec for a `Keypair`.
#[derive(Encode, Decode)]
struct SszEncodableKeypair {
pk: PublicKey,
sk: SecretKey,
}
impl Into<Keypair> for SszEncodableKeypair {
fn into(self) -> Keypair {
Keypair {
sk: self.sk,
pk: self.pk,
}
}
}
impl From<Keypair> for SszEncodableKeypair {
fn from(kp: Keypair) -> Self {
Self {
sk: kp.sk,
pk: kp.pk,
}
}
}
/// Builds a `ValidatorDirectory`, both in-memory and on-disk.
#[derive(Default)]
pub struct ValidatorDirectoryBuilder {
directory: Option<PathBuf>,
voting_keypair: Option<Keypair>,
withdrawal_keypair: Option<Keypair>,
amount: Option<u64>,
deposit_data: Option<Vec<u8>>,
spec: Option<ChainSpec>,
}
impl ValidatorDirectoryBuilder {
pub fn spec(mut self, spec: ChainSpec) -> Self {
self.spec = Some(spec);
self
}
pub fn full_deposit_amount(mut self) -> Result<Self, String> {
let spec = self
.spec
.as_ref()
.ok_or_else(|| "full_deposit_amount requires a spec")?;
self.amount = Some(spec.max_effective_balance);
Ok(self)
}
pub fn custom_deposit_amount(mut self, gwei: u64) -> Self {
self.amount = Some(gwei);
self
}
pub fn thread_random_keypairs(mut self) -> Self {
self.voting_keypair = Some(Keypair::random());
self.withdrawal_keypair = Some(Keypair::random());
self
}
pub fn insecure_keypairs(mut self, index: usize) -> Self {
let keypair = generate_deterministic_keypair(index);
self.voting_keypair = Some(keypair.clone());
self.withdrawal_keypair = Some(keypair);
self
}
/// Creates a validator directory in the given `base_path` (e.g., `~/.lighthouse/validators/`).
pub fn create_directory(mut self, base_path: PathBuf) -> Result<Self, String> {
let voting_keypair = self
.voting_keypair
.as_ref()
.ok_or_else(|| "directory requires a voting_keypair")?;
let directory = base_path.join(dir_name(&voting_keypair.pk));
if directory.exists() {
return Err(format!(
"Validator directory already exists: {:?}",
directory
));
}
fs::create_dir_all(&directory)
.map_err(|e| format!("Unable to create validator directory: {}", e))?;
self.directory = Some(directory);
Ok(self)
}
pub fn write_keypair_files(self) -> Result<Self, String> {
let voting_keypair = self
.voting_keypair
.clone()
.ok_or_else(|| "write_keypair_files requires a voting_keypair")?;
let withdrawal_keypair = self
.withdrawal_keypair
.clone()
.ok_or_else(|| "write_keypair_files requires a withdrawal_keypair")?;
self.save_keypair(voting_keypair, VOTING_KEY_PREFIX)?;
self.save_keypair(withdrawal_keypair, WITHDRAWAL_KEY_PREFIX)?;
Ok(self)
}
fn save_keypair(&self, keypair: Keypair, file_prefix: &str) -> Result<(), String> {
let path = self
.directory
.as_ref()
.map(|directory| directory.join(keypair_file(file_prefix)))
.ok_or_else(|| "save_keypair requires a directory")?;
if path.exists() {
return Err(format!("Keypair file already exists at: {:?}", path));
}
let mut file = File::create(&path).map_err(|e| format!("Unable to create file: {}", e))?;
// Ensure file has correct permissions.
let mut perm = file
.metadata()
.map_err(|e| format!("Unable to get file metadata: {}", e))?
.permissions();
perm.set_mode((libc::S_IWUSR | libc::S_IRUSR) as u32);
file.set_permissions(perm)
.map_err(|e| format!("Unable to set file permissions: {}", e))?;
file.write_all(&SszEncodableKeypair::from(keypair).as_ssz_bytes())
.map_err(|e| format!("Unable to write keypair to file: {}", e))?;
Ok(())
}
pub fn write_eth1_data_file(mut self) -> Result<Self, String> {
let voting_keypair = self
.voting_keypair
.as_ref()
.ok_or_else(|| "write_eth1_data_file requires a voting_keypair")?;
let withdrawal_keypair = self
.withdrawal_keypair
.as_ref()
.ok_or_else(|| "write_eth1_data_file requires a withdrawal_keypair")?;
let amount = self
.amount
.ok_or_else(|| "write_eth1_data_file requires an amount")?;
let spec = self.spec.as_ref().ok_or_else(|| "build requires a spec")?;
let path = self
.directory
.as_ref()
.map(|directory| directory.join(ETH1_DEPOSIT_DATA_FILE))
.ok_or_else(|| "write_eth1_data_filer requires a directory")?;
let deposit_data = {
let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials(
&withdrawal_keypair.pk,
spec.bls_withdrawal_prefix_byte,
));
let mut deposit_data = DepositData {
pubkey: voting_keypair.pk.clone().into(),
withdrawal_credentials,
amount,
signature: Signature::empty_signature().into(),
};
deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec);
eth1_tx_data(&deposit_data)
.map_err(|e| format!("Unable to encode eth1 deposit tx data: {:?}", e))?
};
if path.exists() {
return Err(format!("Eth1 data file already exists at: {:?}", path));
}
File::create(&path)
.map_err(|e| format!("Unable to create file: {}", e))?
.write_all(&format!("0x{}", hex::encode(&deposit_data)).as_bytes())
.map_err(|e| format!("Unable to write eth1 data file: {}", e))?;
self.deposit_data = Some(deposit_data);
Ok(self)
}
pub fn build(self) -> Result<ValidatorDirectory, String> {
Ok(ValidatorDirectory {
directory: self.directory.ok_or_else(|| "build requires a directory")?,
voting_keypair: self.voting_keypair,
withdrawal_keypair: self.withdrawal_keypair,
deposit_data: self.deposit_data,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempdir::TempDir;
use types::{EthSpec, MinimalEthSpec};
type E = MinimalEthSpec;
#[test]
fn random_keypairs_round_trip() {
let spec = E::default_spec();
let temp_dir = TempDir::new("acc_manager").expect("should create test dir");
let created_dir = ValidatorDirectoryBuilder::default()
.spec(spec)
.full_deposit_amount()
.expect("should set full deposit amount")
.thread_random_keypairs()
.create_directory(temp_dir.path().into())
.expect("should create directory")
.write_keypair_files()
.expect("should write keypair files")
.write_eth1_data_file()
.expect("should write eth1 data file")
.build()
.expect("should build dir");
let loaded_dir = ValidatorDirectory::load_for_signing(created_dir.directory.clone())
.expect("should load directory");
assert_eq!(
created_dir, loaded_dir,
"the directory created should match the one loaded"
);
}
#[test]
fn deterministic_keypairs_round_trip() {
let spec = E::default_spec();
let temp_dir = TempDir::new("acc_manager").expect("should create test dir");
let index = 42;
let created_dir = ValidatorDirectoryBuilder::default()
.spec(spec)
.full_deposit_amount()
.expect("should set full deposit amount")
.insecure_keypairs(index)
.create_directory(temp_dir.path().into())
.expect("should create directory")
.write_keypair_files()
.expect("should write keypair files")
.write_eth1_data_file()
.expect("should write eth1 data file")
.build()
.expect("should build dir");
assert!(
created_dir.directory.exists(),
"should have created directory"
);
let mut parent = created_dir.directory.clone();
parent.pop();
assert_eq!(
parent,
PathBuf::from(temp_dir.path()),
"should have created directory ontop of base dir"
);
let expected_keypair = generate_deterministic_keypair(index);
assert_eq!(
created_dir.voting_keypair,
Some(expected_keypair.clone()),
"voting keypair should be as expected"
);
assert_eq!(
created_dir.withdrawal_keypair,
Some(expected_keypair),
"withdrawal keypair should be as expected"
);
assert!(
created_dir
.deposit_data
.clone()
.expect("should have data")
.len()
> 0,
"should have some deposit data"
);
let loaded_dir = ValidatorDirectory::load_for_signing(created_dir.directory.clone())
.expect("should load directory");
assert_eq!(
created_dir, loaded_dir,
"the directory created should match the one loaded"
);
}
}

View File

@ -0,0 +1,200 @@
use crate::fork_service::ForkService;
use crate::validator_directory::{ValidatorDirectory, ValidatorDirectoryBuilder};
use parking_lot::RwLock;
use rayon::prelude::*;
use slog::{error, Logger};
use slot_clock::SlotClock;
use std::collections::HashMap;
use std::fs::read_dir;
use std::iter::FromIterator;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::Arc;
use tempdir::TempDir;
use tree_hash::TreeHash;
use types::{
Attestation, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, PublicKey, Signature,
};
#[derive(Clone)]
pub struct ValidatorStore<T, E: EthSpec> {
validators: Arc<RwLock<HashMap<PublicKey, ValidatorDirectory>>>,
spec: Arc<ChainSpec>,
log: Logger,
temp_dir: Option<Arc<TempDir>>,
fork_service: ForkService<T, E>,
_phantom: PhantomData<E>,
}
impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
pub fn load_from_disk(
base_dir: PathBuf,
spec: ChainSpec,
fork_service: ForkService<T, E>,
log: Logger,
) -> Result<Self, String> {
let validator_iter = read_dir(&base_dir)
.map_err(|e| format!("Failed to read base directory: {:?}", e))?
.filter_map(|validator_dir| {
let path = validator_dir.ok()?.path();
if path.is_dir() {
match ValidatorDirectory::load_for_signing(path.clone()) {
Ok(validator_directory) => Some(validator_directory),
Err(e) => {
error!(
log,
"Failed to load a validator directory";
"error" => e,
"path" => path.to_str(),
);
None
}
}
} else {
None
}
})
.filter_map(|validator_directory| {
validator_directory
.voting_keypair
.clone()
.map(|voting_keypair| (voting_keypair.pk, validator_directory))
});
Ok(Self {
validators: Arc::new(RwLock::new(HashMap::from_iter(validator_iter))),
spec: Arc::new(spec),
log,
temp_dir: None,
fork_service,
_phantom: PhantomData,
})
}
pub fn insecure_ephemeral_validators(
validator_indices: &[usize],
spec: ChainSpec,
fork_service: ForkService<T, E>,
log: Logger,
) -> Result<Self, String> {
let temp_dir = TempDir::new("insecure_validator")
.map_err(|e| format!("Unable to create temp dir: {:?}", e))?;
let data_dir = PathBuf::from(temp_dir.path());
let validators = validator_indices
.par_iter()
.map(|index| {
ValidatorDirectoryBuilder::default()
.spec(spec.clone())
.full_deposit_amount()?
.insecure_keypairs(*index)
.create_directory(data_dir.clone())?
.write_keypair_files()?
.write_eth1_data_file()?
.build()
})
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.filter_map(|validator_directory| {
validator_directory
.voting_keypair
.clone()
.map(|voting_keypair| (voting_keypair.pk, validator_directory))
});
Ok(Self {
validators: Arc::new(RwLock::new(HashMap::from_iter(validators))),
spec: Arc::new(spec),
log,
temp_dir: Some(Arc::new(temp_dir)),
fork_service,
_phantom: PhantomData,
})
}
pub fn voting_pubkeys(&self) -> Vec<PublicKey> {
self.validators
.read()
.iter()
.map(|(pubkey, _dir)| pubkey.clone())
.collect()
}
pub fn num_voting_validators(&self) -> usize {
self.validators.read().len()
}
fn fork(&self) -> Option<Fork> {
if self.fork_service.fork().is_none() {
error!(
self.log,
"Unable to get Fork for signing";
);
}
self.fork_service.fork()
}
pub fn randao_reveal(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option<Signature> {
// TODO: check this against the slot clock to make sure it's not an early reveal?
self.validators
.read()
.get(validator_pubkey)
.and_then(|validator_dir| {
let voting_keypair = validator_dir.voting_keypair.as_ref()?;
let message = epoch.tree_hash_root();
let domain = self.spec.get_domain(epoch, Domain::Randao, &self.fork()?);
Some(Signature::new(&message, domain, &voting_keypair.sk))
})
}
pub fn sign_block(
&self,
validator_pubkey: &PublicKey,
mut block: BeaconBlock<E>,
) -> Option<BeaconBlock<E>> {
// TODO: check for slashing.
self.validators
.read()
.get(validator_pubkey)
.and_then(|validator_dir| {
let voting_keypair = validator_dir.voting_keypair.as_ref()?;
block.sign(&voting_keypair.sk, &self.fork()?, &self.spec);
Some(block)
})
}
pub fn sign_attestation(
&self,
validator_pubkey: &PublicKey,
validator_committee_position: usize,
attestation: &mut Attestation<E>,
) -> Option<()> {
// TODO: check for slashing.
self.validators
.read()
.get(validator_pubkey)
.and_then(|validator_dir| {
let voting_keypair = validator_dir.voting_keypair.as_ref()?;
attestation
.sign(
&voting_keypair.sk,
validator_committee_position,
&self.fork()?,
&self.spec,
)
.map_err(|e| {
error!(
self.log,
"Error whilst signing attestation";
"error" => format!("{:?}", e)
)
})
.ok()?;
Some(())
})
}
}