diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index f666d8403..000000000 --- a/.gitmodules +++ /dev/null @@ -1,9 +0,0 @@ -[submodule "lighthouse-libs"] - path = lighthouse-libs - url = git@github.com:sigp/lighthouse-libs.git -[submodule "lighthouse-beacon"] - path = lighthouse-beacon - url = git@github.com:sigp/lighthouse-beacon.git -[submodule "lighthouse-validator"] - path = lighthouse-validator - url = https://github.com/sigp/lighthouse-validator diff --git a/.travis.yml b/.travis.yml index 67541b975..d43d21a00 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,4 @@ language: rust -git: - submodules: false before_install: - curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip - unzip protoc-3.4.0-linux-x86_64.zip -d protoc3 @@ -8,15 +6,10 @@ before_install: - sudo mv protoc3/include/* /usr/local/include/ - sudo chown $USER /usr/local/bin/protoc - sudo chown -R $USER /usr/local/include/google - - sed -i 's/git@github.com:/https:\/\/github.com\//' .gitmodules - - git submodule update --init --recursive script: - - cargo build --verbose --all --manifest-path lighthouse-beacon/Cargo.toml - - cargo build --verbose --all --manifest-path lighthouse-validator/Cargo.toml - - cargo build --verbose --all --manifest-path lighthouse-libs/Cargo.toml - - cargo test --verbose --all --manifest-path lighthouse-beacon/Cargo.toml - - cargo test --verbose --all --manifest-path lighthouse-validator/Cargo.toml - - cargo test --verbose --all --manifest-path lighthouse-libs/Cargo.toml + - cargo build --verbose --all + - cargo test --verbose --all + - cargo fmt --all -- --check rust: - stable - beta diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..e5b34f083 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,121 @@ +# Contributors Guide + +Lighthouse is an open-source Ethereum 2.0 client. We we're community driven and +welcome all contribution. We aim to provide a constructive, respectful and fun +environment for collaboration. + +We are active contributors to the [Ethereum 2.0 specification](https://github.com/ethereum/eth2.0-specs) and attend all [Eth +2.0 implementers calls](https://github.com/ethereum/eth2.0-pm). + +This guide is geared towards beginners. If you're an open-source veteran feel +free to just skim this document and get straight into crushing issues. + +## Why Contribute + +There are many reasons you might contribute to Lighthouse. For example, you may +wish to: + +- contribute to the Ethereum ecosystem. +- establish yourself as a layer-1 Ethereum developer. +- work in the amazing Rust programming language. +- learn how to participate in open-source projects. +- expand your software development skills. +- flex your skills in a public forum to expand your career + opportunities (or simply for the fun of it). +- grow your network by working with core Ethereum developers. + +## How to Contribute + +Regardless of the reason, the process to begin contributing is very much the +same. We operate like a typical open-source project operating on GitHub: the +repository [Issues](https://github.com/sigp/lighthouse/issues) is where we +track what needs to be done and [Pull +Requests](https://github.com/sigp/lighthouse/pulls) is where code gets +reviewed. We use [gitter](https://gitter.im/sigp/lighthouse) to chat +informally. + +### General Work-Flow + +We recommend the following work-flow for contributors: + +1. **Find an issue** to work on, either because it's interesting or suitable to + your skill-set. Use comments to communicate your intentions and ask +questions. +2. **Work in a feature branch** of your personal fork + (github.com/YOUR_NAME/lighthouse) of the main repository + (github.com/sigp/lighthouse). +3. Once you feel you have addressed the issue, **create a pull-request** to merge + your changes in to the main repository. +4. Wait for the repository maintainers to **review your changes** to ensure the + issue is addressed satisfactorily. Optionally, mention your PR on +[gitter](https://gitter.im/sigp/lighthouse). +5. If the issue is addressed the repository maintainers will **merge your + pull-request** and you'll be an official contributor! + +Generally, you find an issue you'd like to work on and announce your intentions +to start work in a comment on the issue. Then, do your work on a separate +branch (a "feature branch") in your own fork of the main repository. Once +you're happy and you think the issue has been addressed, create a pull request +into the main repository. + +### First-time Set-up + +First time contributors can get their git environment up and running with these +steps: + +1. [Create a + fork](https://help.github.com/articles/fork-a-repo/#fork-an-example-repository) +and [clone +it](https://help.github.com/articles/fork-a-repo/#step-2-create-a-local-clone-of-your-fork) +to your local machine. +2. [Add an _"upstream"_ + branch](https://help.github.com/articles/fork-a-repo/#step-3-configure-git-to-sync-your-fork-with-the-original-spoon-knife-repository) +that tracks github.com/sigp/lighthouse using `$ git remote add upstream +https://github.com/sigp/lighthouse.git` (pro-tip: [use SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) instead of HTTPS). +3. Create a new feature branch with `$ git checkout -b your_feature_name`. The + name of your branch isn't critical but it should be short and instructive. +E.g., if you're fixing a bug with serialization, you could name your branch +`fix_serialization_bug`. +4. Commit your changes and push them to your fork with `$ git push origin + your_feature_name`. +5. Go to your fork on github.com and use the web interface to create a pull + request into the sigp/lighthouse repo. + +From there, the repository maintainers will review the PR and either accept it +or provide some constructive feedback. + +There's great +[guide](https://akrabat.com/the-beginners-guide-to-contributing-to-a-github-project/) +by Rob Allen that provides much more detail on each of these steps, if you're +having trouble. As always, jump on [gitter](https://gitter.im/sigp/lighthouse) +if you get stuck. + + +## FAQs + +### I don't think I have anything to add + +There's lots to be done and there's all sorts of tasks. You can do anything +from correcting typos through to writing core consensus code. If you reach out, +we'll include you. + +### I'm not sure my Rust is good enough + +We're open to developers of all levels. If you create a PR and your code +doesn't meet our standards, we'll help you fix it and we'll share the reasoning +with you. Contributing to open-source is a great way to learn. + +### I'm not sure I know enough about Ethereum 2.0 + +No problems, there's plenty of tasks that don't require extensive Ethereum +knowledge. You can learn about Ethereum as you go. + +### I'm afraid of making a mistake and looking silly + +Don't be. We're all about personal development and constructive feedback. If you +make a mistake and learn from it, everyone wins. + +### I don't like the way you do things + +Please, make an issue and explain why. We're open to constructive criticism and +will happily change our ways. diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..a2c464366 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,21 @@ +[workspace] +members = [ + "eth2/attester", + "eth2/block_producer", + "eth2/fork_choice", + "eth2/state_processing", + "eth2/types", + "eth2/utils/bls", + "eth2/utils/boolean-bitfield", + "eth2/utils/hashing", + "eth2/utils/honey-badger-split", + "eth2/utils/slot_clock", + "eth2/utils/ssz", + "eth2/utils/vec_shuffle", + "beacon_node", + "beacon_node/db", + "beacon_node/beacon_chain", + "beacon_node/beacon_chain/test_harness", + "protos", + "validator_client", +] diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..063ece3cd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM rust:latest + +RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool + +RUN git clone https://github.com/google/protobuf.git && \ + cd protobuf && \ + ./autogen.sh && \ + ./configure && \ + make && \ + make install && \ + ldconfig && \ + make clean && \ + cd .. && \ + rm -r protobuf + + +RUN mkdir /cargocache && chmod -R ugo+rwX /cargocache diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..42755d5f7 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,20 @@ +pipeline { + agent { + dockerfile { + filename 'Dockerfile' + args '-v cargo-cache:/cargocache:rw -e "CARGO_HOME=/cargocache"' + } + } + stages { + stage('Build') { + steps { + sh 'cargo build' + } + } + stage('Test') { + steps { + sh 'cargo test --all' + } + } + } +} diff --git a/README.md b/README.md index 7f82493fc..7759c1166 100644 --- a/README.md +++ b/README.md @@ -7,49 +7,7 @@ Chain, maintained by Sigma Prime. The "Serenity" project is also known as "Ethereum 2.0" or "Shasper". -## Project Structure - -The Lighthouse project is managed across four Github repositories: - -- [sigp/lighthouse](https://github.com/sigp/lighthouse) (this repo): The - "integration" repository which provides: - - Project-wide documentation - - A landing-page for users and contributors. - - In the future, various other integration tests and orchestration suites. -- [sigp/lighthouse-libs](https://github.com/sigp/lighthouse-libs): Contains - Rust crates common to the entire Lighthouse project, including: - - Pure specification logic (e.g., state transitions, etc) - - SSZ (SimpleSerialize) - - BLS Signature libraries, and more. -- [sigp/lighthouse-beacon](https://github.com/sigp/lighthouse-beacon): The - beacon node binary, responsible for connection to peers across the - network and maintaining a view of the Beacon Chain. -- [sigp/lighthouse-validator](https://github.com/sigp/lighthouse-validator): - The validator client binary, which connects to a beacon node and fulfils - the duties of a staked validator (producing and attesting to blocks). - -## Contributing - -We welcome new contributors and greatly appreciate the efforts from existing -contributors. - -If you'd like to contribute to development on Lighthouse, we recommend checking -for [issues on the lighthouse-libs -repo](https://github.com/sigp/lighthouse-libs/issues) first, then checking the -other repositories. - -If you don't find anything there, please reach out on the -[gitter](https://gitter.im/sigp/lighthouse) channel. - -Additional resources: - -- [ONBOARDING.md](docs/ONBOARDING.md): General on-boarding info, - including style-guide. -- [LIGHTHOUSE.md](docs/LIGHTHOUSE.md): Project goals and ethos. -- [RUNNING.md](docs/RUNNING.md): Step-by-step on getting the code running. -- [SERENITY.md](docs/SERENITY.md): Introduction to Ethereum Serenity. - -## Project Summary +## Lighthouse Client Lighthouse is an open-source Ethereum Serenity client that is currently under development. Designed as a Serenity-only client, Lighthouse will not @@ -61,6 +19,15 @@ to existing clients, such as [Parity-Ethereum](https://github.com/paritytech/parity-ethereum), via RPC to enable present-Ethereum functionality. +### Further Reading + +- [About Lighthouse](docs/lighthouse.md): Goals, Ideology and Ethos surrounding +this implementation. +- [What is Ethereum Serenity](docs/serenity.md): an introduction to Ethereum Serenity. + +If you'd like some background on Sigma Prime, please see the [Lighthouse Update +\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the +[company website](https://sigmaprime.io). ### Components @@ -94,7 +61,7 @@ by the team: from the Ethereum Foundation to develop *simpleserialize* (SSZ), a purpose-built serialization format for sending information across a network. Check out the [SSZ -implementation](https://github.com/sigp/lighthouse-libs/tree/master/ssz) +implementation](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) and this [research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md) on serialization formats for more information. @@ -112,23 +79,89 @@ In addition to these components we are also working on database schemas, RPC frameworks, specification development, database optimizations (e.g., bloom-filters), and tons of other interesting stuff (at least we think so). +### Directory Structure + +Here we provide an overview of the directory structure: + +- `beacon_chain/`: contains logic derived directly from the specification. + E.g., shuffling algorithms, state transition logic and structs, block +validation, BLS crypto, etc. +- `lighthouse/`: contains logic specific to this client implementation. E.g., + CLI parsing, RPC end-points, databases, etc. + +### Running + +**NOTE: The cryptography libraries used in this implementation are +experimental. As such all cryptography is assumed to be insecure.** + +This code-base is still very much under-development and does not provide any +user-facing functionality. For developers and researchers, there are several +tests and benchmarks which may be of interest. + +A few basic steps are needed to get set up: + + 1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust (Linux | macos | Windows). For installation run the below command in your terminal `$ curl https://sh.rustup.rs -sSf | sh` + 2. (Linux & MacOS) To configure your current shell run: `$ source $HOME/.cargo/env` + 3. Use the command `rustup show` to get information about the Rust installation. You should see that the + active toolchain is the stable version. + 4. Run `rustc --version` to check the installation and version of rust. + - Updates can be performed using` rustup update` . + 5. Install build dependencies (Arch packages are listed here, your distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC). + 6. Navigate to the working directory. + 7. Run the test by using command `cargo test --all`. By running, it will pass all the required test cases. + If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time + to build, compile and pass all test cases. If there is no error then it means everything is working properly + and it's time to get your hands dirty. + In case, if there is an error, then please raise the [issue](https://github.com/sigp/lighthouse/issues). + We will help you. + 8. As an alternative to, or instead of the above step, you may also run benchmarks by using + the command `cargo bench --all` + +##### Note: +Lighthouse presently runs on Rust `stable`, however, benchmarks currently require the +`nightly` version. + +##### Note for Windows users: +Perl may also be required to build lighthouse. You can install [Strawberry Perl](http://strawberryperl.com/), +or alternatively use a choco install command `choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues compiling in Windows. You can specify +a known working version by editing version in protos/Cargo.toml's "build-dependencies" section to +`protoc-grpcio = "<=0.3.0"`. + +### Contributing + +**Lighthouse welcomes contributors with open-arms.** + +If you would like to learn more about Ethereum Serenity and/or +[Rust](https://www.rust-lang.org/), we are more than happy to on-board you +and assign you some tasks. We aim to be as accepting and understanding as +possible; we are more than happy to up-skill contributors in exchange for their +assistance with the project. + +Alternatively, if you are an ETH/Rust veteran, we'd love your input. We're +always looking for the best way to implement things and welcome all +respectful criticisms. + +If you are looking to contribute, please head to our +[onboarding documentation](https://github.com/sigp/lighthouse/blob/master/docs/onboarding.md). + +If you'd like to contribute, try having a look through the [open +issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good +first +issue](https://github.com/sigp/lighthouse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) +tag) and ping us on the [gitter](https://gitter.im/sigp/lighthouse) channel. We need +your support! ## Contact The best place for discussion is the [sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). Ping @paulhauner or @AgeManning to get the quickest response. -If you'd like some background on Sigma Prime, please see the [Lighthouse Update -\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the -[company website](https://sigmaprime.io). - # Donations -We accept donations at the following Ethereum address. All donations go towards -funding development of Ethereum 2.0. +If you support the cause, we could certainly use donations to help fund development: -[`0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b`](https://etherscan.io/address/0x25c4a76e7d118705e7ea2e9b7d8c59930d8acd3b) - -Alternatively, you can contribute via [Gitcoin Grant](https://gitcoin.co/grants/25/lighthouse-ethereum-20-client). - -We appreciate all contributions to the project. +`0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b` diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml new file mode 100644 index 000000000..a4804e07e --- /dev/null +++ b/beacon_node/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "beacon_node" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bls = { path = "../eth2/utils/bls" } +beacon_chain = { path = "beacon_chain" } +grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +protobuf = "2.0.2" +protos = { path = "../protos" } +clap = "2.32.0" +db = { path = "db" } +dirs = "1.0.3" +futures = "0.1.23" +fork_choice = { path = "../eth2/fork_choice" } +slog = "^2.2.3" +slot_clock = { path = "../eth2/utils/slot_clock" } +slog-term = "^2.4.0" +slog-async = "^2.3.0" +types = { path = "../eth2/types" } +ssz = { path = "../eth2/utils/ssz" } +tokio = "0.1" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml new file mode 100644 index 000000000..36d7b3721 --- /dev/null +++ b/beacon_node/beacon_chain/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "beacon_chain" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +block_producer = { path = "../../eth2/block_producer" } +bls = { path = "../../eth2/utils/bls" } +boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" } +db = { path = "../db" } +failure = "0.1" +failure_derive = "0.1" +hashing = { path = "../../eth2/utils/hashing" } +fork_choice = { path = "../../eth2/fork_choice" } +parking_lot = "0.7" +log = "0.4" +env_logger = "0.6" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +state_processing = { path = "../../eth2/state_processing" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/beacon_chain/src/attestation_aggregator.rs b/beacon_node/beacon_chain/src/attestation_aggregator.rs new file mode 100644 index 000000000..149f0d60d --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_aggregator.rs @@ -0,0 +1,217 @@ +use state_processing::validate_attestation_without_signature; +use std::collections::{HashMap, HashSet}; +use types::{ + beacon_state::CommitteesError, AggregateSignature, Attestation, AttestationData, BeaconState, + Bitfield, ChainSpec, FreeAttestation, Signature, +}; + +const PHASE_0_CUSTODY_BIT: bool = false; + +/// Provides the functionality to: +/// +/// - Recieve a `FreeAttestation` and aggregate it into an `Attestation` (or create a new if it +/// doesn't exist). +/// - Store all aggregated or created `Attestation`s. +/// - Produce a list of attestations that would be valid for inclusion in some `BeaconState` (and +/// therefore valid for inclusion in a `BeaconBlock`. +/// +/// Note: `Attestations` are stored in memory and never deleted. This is not scalable and must be +/// rectified in a future revision. +#[derive(Default)] +pub struct AttestationAggregator { + store: HashMap, Attestation>, +} + +pub struct Outcome { + pub valid: bool, + pub message: Message, +} + +pub enum Message { + /// The free attestation was added to an existing attestation. + Aggregated, + /// The free attestation has already been aggregated to an existing attestation. + AggregationNotRequired, + /// The free attestation was transformed into a new attestation. + NewAttestationCreated, + /// The supplied `validator_index` is not in the committee for the given `shard` and `slot`. + BadValidatorIndex, + /// The given `signature` did not match the `pubkey` in the given + /// `state.validator_registry`. + BadSignature, + /// The given `slot` does not match the validators committee assignment. + BadSlot, + /// The given `shard` does not match the validators committee assignment. + BadShard, +} + +macro_rules! some_or_invalid { + ($expression: expr, $error: expr) => { + match $expression { + Some(x) => x, + None => { + return Ok(Outcome { + valid: false, + message: $error, + }); + } + } + }; +} + +impl AttestationAggregator { + /// Instantiates a new AttestationAggregator with an empty database. + pub fn new() -> Self { + Self { + store: HashMap::new(), + } + } + + /// Accepts some `FreeAttestation`, validates it and either aggregates it upon some existing + /// `Attestation` or produces a new `Attestation`. + /// + /// The "validation" provided is not complete, instead the following points are checked: + /// - The given `validator_index` is in the committee for the given `shard` for the given + /// `slot`. + /// - The signature is verified against that of the validator at `validator_index`. + pub fn process_free_attestation( + &mut self, + state: &BeaconState, + free_attestation: &FreeAttestation, + spec: &ChainSpec, + ) -> Result { + let (slot, shard, committee_index) = some_or_invalid!( + state.attestation_slot_and_shard_for_validator( + free_attestation.validator_index as usize, + spec, + )?, + Message::BadValidatorIndex + ); + + if free_attestation.data.slot != slot { + return Ok(Outcome { + valid: false, + message: Message::BadSlot, + }); + } + if free_attestation.data.shard != shard { + return Ok(Outcome { + valid: false, + message: Message::BadShard, + }); + } + + let signable_message = free_attestation.data.signable_message(PHASE_0_CUSTODY_BIT); + + let validator_record = some_or_invalid!( + state + .validator_registry + .get(free_attestation.validator_index as usize), + Message::BadValidatorIndex + ); + + if !free_attestation + .signature + .verify(&signable_message, &validator_record.pubkey) + { + return Ok(Outcome { + valid: false, + message: Message::BadSignature, + }); + } + + if let Some(existing_attestation) = self.store.get(&signable_message) { + if let Some(updated_attestation) = aggregate_attestation( + existing_attestation, + &free_attestation.signature, + committee_index as usize, + ) { + self.store.insert(signable_message, updated_attestation); + Ok(Outcome { + valid: true, + message: Message::Aggregated, + }) + } else { + Ok(Outcome { + valid: true, + message: Message::AggregationNotRequired, + }) + } + } else { + let mut aggregate_signature = AggregateSignature::new(); + aggregate_signature.add(&free_attestation.signature); + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(committee_index as usize, true); + let new_attestation = Attestation { + data: free_attestation.data.clone(), + aggregation_bitfield, + custody_bitfield: Bitfield::new(), + aggregate_signature, + }; + self.store.insert(signable_message, new_attestation); + Ok(Outcome { + valid: true, + message: Message::NewAttestationCreated, + }) + } + } + + /// Returns all known attestations which are: + /// + /// - Valid for the given state + /// - Not already in `state.latest_attestations`. + pub fn get_attestations_for_state( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut known_attestation_data: HashSet = HashSet::new(); + + state.latest_attestations.iter().for_each(|attestation| { + known_attestation_data.insert(attestation.data.clone()); + }); + + self.store + .values() + .filter_map(|attestation| { + if validate_attestation_without_signature(&state, attestation, spec).is_ok() + && !known_attestation_data.contains(&attestation.data) + { + Some(attestation.clone()) + } else { + None + } + }) + .collect() + } +} + +/// Produces a new `Attestation` where: +/// +/// - `signature` is added to `Attestation.aggregate_signature` +/// - Attestation.aggregation_bitfield[committee_index]` is set to true. +fn aggregate_attestation( + existing_attestation: &Attestation, + signature: &Signature, + committee_index: usize, +) -> Option { + let already_signed = existing_attestation + .aggregation_bitfield + .get(committee_index) + .unwrap_or(false); + + if already_signed { + None + } else { + let mut aggregation_bitfield = existing_attestation.aggregation_bitfield.clone(); + aggregation_bitfield.set(committee_index, true); + let mut aggregate_signature = existing_attestation.aggregate_signature.clone(); + aggregate_signature.add(&signature); + + Some(Attestation { + aggregation_bitfield, + aggregate_signature, + ..existing_attestation.clone() + }) + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs new file mode 100644 index 000000000..41ceb4e29 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -0,0 +1,595 @@ +use crate::attestation_aggregator::{AttestationAggregator, Outcome as AggregationOutcome}; +use crate::checkpoint::CheckPoint; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, DBError, +}; +use fork_choice::{ForkChoice, ForkChoiceError}; +use log::{debug, trace}; +use parking_lot::{RwLock, RwLockReadGuard}; +use slot_clock::SlotClock; +use ssz::ssz_encode; +use state_processing::{ + BlockProcessable, BlockProcessingError, SlotProcessable, SlotProcessingError, +}; +use std::sync::Arc; +use types::{ + beacon_state::CommitteesError, + readers::{BeaconBlockReader, BeaconStateReader}, + AttestationData, BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Crosslink, Deposit, + Epoch, Eth1Data, FreeAttestation, Hash256, PublicKey, Signature, Slot, +}; + +#[derive(Debug, PartialEq)] +pub enum Error { + InsufficientValidators, + BadRecentBlockRoots, + CommitteesError(CommitteesError), + DBInconsistent(String), + DBError(String), + ForkChoiceError(ForkChoiceError), + MissingBeaconBlock(Hash256), + MissingBeaconState(Hash256), +} + +#[derive(Debug, PartialEq)] +pub enum ValidBlock { + /// The block was successfully processed. + Processed, +} + +#[derive(Debug, PartialEq)] +pub enum InvalidBlock { + /// The block slot is greater than the present slot. + FutureSlot, + /// The block state_root does not match the generated state. + StateRootMismatch, + /// The blocks parent_root is unknown. + ParentUnknown, + /// There was an error whilst advancing the parent state to the present slot. This condition + /// should not occur, it likely represents an internal error. + SlotProcessingError(SlotProcessingError), + /// The block could not be applied to the state, it is invalid. + PerBlockProcessingError(BlockProcessingError), +} + +#[derive(Debug, PartialEq)] +pub enum BlockProcessingOutcome { + /// The block was successfully validated. + ValidBlock(ValidBlock), + /// The block was not successfully validated. + InvalidBlock(InvalidBlock), +} + +pub struct BeaconChain { + pub block_store: Arc>, + pub state_store: Arc>, + pub slot_clock: U, + pub attestation_aggregator: RwLock, + canonical_head: RwLock, + finalized_head: RwLock, + pub state: RwLock, + pub spec: ChainSpec, + pub fork_choice: RwLock, +} + +impl BeaconChain +where + T: ClientDB, + U: SlotClock, + F: ForkChoice, +{ + /// Instantiate a new Beacon Chain, from genesis. + pub fn genesis( + state_store: Arc>, + block_store: Arc>, + slot_clock: U, + genesis_time: u64, + latest_eth1_data: Eth1Data, + initial_validator_deposits: Vec, + spec: ChainSpec, + fork_choice: F, + ) -> Result { + if initial_validator_deposits.is_empty() { + return Err(Error::InsufficientValidators); + } + + let genesis_state = BeaconState::genesis( + genesis_time, + initial_validator_deposits, + latest_eth1_data, + &spec, + ); + let state_root = genesis_state.canonical_root(); + state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; + + let genesis_block = BeaconBlock::genesis(state_root, &spec); + let block_root = genesis_block.canonical_root(); + block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; + + let finalized_head = RwLock::new(CheckPoint::new( + genesis_block.clone(), + block_root, + genesis_state.clone(), + state_root, + )); + let canonical_head = RwLock::new(CheckPoint::new( + genesis_block.clone(), + block_root, + genesis_state.clone(), + state_root, + )); + let attestation_aggregator = RwLock::new(AttestationAggregator::new()); + + Ok(Self { + block_store, + state_store, + slot_clock, + attestation_aggregator, + state: RwLock::new(genesis_state.clone()), + finalized_head, + canonical_head, + spec, + fork_choice: RwLock::new(fork_choice), + }) + } + + /// Update the canonical head to some new values. + pub fn update_canonical_head( + &self, + new_beacon_block: BeaconBlock, + new_beacon_block_root: Hash256, + new_beacon_state: BeaconState, + new_beacon_state_root: Hash256, + ) { + let mut head = self.canonical_head.write(); + head.update( + new_beacon_block, + new_beacon_block_root, + new_beacon_state, + new_beacon_state_root, + ); + } + + /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the + /// fork-choice rule). + /// + /// It is important to note that the `beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was recieved, which could be some slots prior to + /// now. + pub fn head(&self) -> RwLockReadGuard { + self.canonical_head.read() + } + + /// Update the justified head to some new values. + pub fn update_finalized_head( + &self, + new_beacon_block: BeaconBlock, + new_beacon_block_root: Hash256, + new_beacon_state: BeaconState, + new_beacon_state_root: Hash256, + ) { + let mut finalized_head = self.finalized_head.write(); + finalized_head.update( + new_beacon_block, + new_beacon_block_root, + new_beacon_state, + new_beacon_state_root, + ); + } + + /// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen, + /// indirectly, by the fork-choice rule). + pub fn finalized_head(&self) -> RwLockReadGuard { + self.finalized_head.read() + } + + /// Advance the `self.state` `BeaconState` to the supplied slot. + /// + /// This will perform per_slot and per_epoch processing as required. + /// + /// The `previous_block_root` will be set to the root of the current head block (as determined + /// by the fork-choice rule). + /// + /// It is important to note that this is _not_ the state corresponding to the canonical head + /// block, instead it is that state which may or may not have had additional per slot/epoch + /// processing applied to it. + pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> { + let state_slot = self.state.read().slot; + let head_block_root = self.head().beacon_block_root; + for _ in state_slot.as_u64()..slot.as_u64() { + self.state + .write() + .per_slot_processing(head_block_root, &self.spec)?; + } + Ok(()) + } + + /// Returns the validator index (if any) for the given public key. + /// + /// Information is retrieved from the present `beacon_state.validator_registry`. + pub fn validator_index(&self, pubkey: &PublicKey) -> Option { + for (i, validator) in self + .head() + .beacon_state + .validator_registry + .iter() + .enumerate() + { + if validator.pubkey == *pubkey { + return Some(i); + } + } + None + } + + /// Reads the slot clock, returns `None` if the slot is unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + /// + /// This is distinct to `present_slot`, which simply reads the latest state. If a + /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, + /// `self.state` should undergo per slot processing. + pub fn read_slot_clock(&self) -> Option { + match self.slot_clock.present_slot() { + Ok(Some(some_slot)) => Some(some_slot), + Ok(None) => None, + _ => None, + } + } + + /// Returns slot of the present state. + /// + /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If + /// `self.state` has not been transitioned it is possible for the system clock to be on a + /// different slot to what is returned from this call. + pub fn present_slot(&self) -> Slot { + self.state.read().slot + } + + /// Returns the block proposer for a given slot. + /// + /// Information is read from the present `beacon_state` shuffling, so only information from the + /// present and prior epoch is available. + pub fn block_proposer(&self, slot: Slot) -> Result { + let index = self + .state + .read() + .get_beacon_proposer_index(slot, &self.spec)?; + + Ok(index) + } + + /// Returns the justified slot for the present state. + pub fn justified_epoch(&self) -> Epoch { + self.state.read().justified_epoch + } + + /// Returns the attestation slot and shard for a given validator index. + /// + /// Information is read from the current state, so only information from the present and prior + /// epoch is available. + pub fn validator_attestion_slot_and_shard( + &self, + validator_index: usize, + ) -> Result, CommitteesError> { + if let Some((slot, shard, _committee)) = self + .state + .read() + .attestation_slot_and_shard_for_validator(validator_index, &self.spec)? + { + Ok(Some((slot, shard))) + } else { + Ok(None) + } + } + + /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + pub fn produce_attestation_data(&self, shard: u64) -> Result { + let justified_epoch = self.justified_epoch(); + let justified_block_root = *self + .state + .read() + .get_block_root( + justified_epoch.start_slot(self.spec.epoch_length), + &self.spec, + ) + .ok_or_else(|| Error::BadRecentBlockRoots)?; + + let epoch_boundary_root = *self + .state + .read() + .get_block_root( + self.state.read().current_epoch_start_slot(&self.spec), + &self.spec, + ) + .ok_or_else(|| Error::BadRecentBlockRoots)?; + + Ok(AttestationData { + slot: self.state.read().slot, + shard, + beacon_block_root: self.head().beacon_block_root, + epoch_boundary_root, + shard_block_root: Hash256::zero(), + latest_crosslink: Crosslink { + epoch: self.state.read().slot.epoch(self.spec.epoch_length), + shard_block_root: Hash256::zero(), + }, + justified_epoch, + justified_block_root, + }) + } + + /// Validate a `FreeAttestation` and either: + /// + /// - Create a new `Attestation`. + /// - Aggregate it to an existing `Attestation`. + pub fn process_free_attestation( + &self, + free_attestation: FreeAttestation, + ) -> Result { + let aggregation_outcome = self + .attestation_aggregator + .write() + .process_free_attestation(&self.state.read(), &free_attestation, &self.spec)?; + // TODO: Check this comment + //.map_err(|e| e.into())?; + + // return if the attestation is invalid + if !aggregation_outcome.valid { + return Ok(aggregation_outcome); + } + + // valid attestation, proceed with fork-choice logic + self.fork_choice.write().add_attestation( + free_attestation.validator_index, + &free_attestation.data.beacon_block_root, + )?; + Ok(aggregation_outcome) + } + + /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. + /// + /// This could be a very expensive operation and should only be done in testing/analysis + /// activities. + pub fn chain_dump(&self) -> Result, Error> { + let mut dump = vec![]; + + let mut last_slot = CheckPoint { + beacon_block: self.head().beacon_block.clone(), + beacon_block_root: self.head().beacon_block_root, + beacon_state: self.head().beacon_state.clone(), + beacon_state_root: self.head().beacon_state_root, + }; + + dump.push(last_slot.clone()); + + loop { + let beacon_block_root = last_slot.beacon_block.parent_root; + + if beacon_block_root == self.spec.zero_hash { + break; // Genesis has been reached. + } + + let beacon_block = self + .block_store + .get_deserialized(&beacon_block_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) + })?; + let beacon_state_root = beacon_block.state_root; + let beacon_state = self + .state_store + .get_deserialized(&beacon_state_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) + })?; + + let slot = CheckPoint { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + }; + + dump.push(slot.clone()); + last_slot = slot; + } + + Ok(dump) + } + + /// Accept some block and attempt to add it to block DAG. + /// + /// Will accept blocks from prior slots, however it will reject any block from a future slot. + pub fn process_block(&self, block: BeaconBlock) -> Result { + debug!("Processing block with slot {}...", block.slot()); + + let block_root = block.canonical_root(); + + let present_slot = self.present_slot(); + + if block.slot > present_slot { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::FutureSlot, + )); + } + + // Load the blocks parent block from the database, returning invalid if that block is not + // found. + let parent_block_root = block.parent_root; + let parent_block = match self.block_store.get_reader(&parent_block_root)? { + Some(parent_root) => parent_root, + None => { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::ParentUnknown, + )); + } + }; + + // Load the parent blocks state from the database, returning an error if it is not found. + // It is an error because if know the parent block we should also know the parent state. + let parent_state_root = parent_block.state_root(); + let parent_state = self + .state_store + .get_reader(&parent_state_root)? + .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))? + .into_beacon_state() + .ok_or_else(|| { + Error::DBInconsistent(format!("State SSZ invalid {}", parent_state_root)) + })?; + + // TODO: check the block proposer signature BEFORE doing a state transition. This will + // significantly lower exposure surface to DoS attacks. + + // Transition the parent state to the present slot. + let mut state = parent_state; + for _ in state.slot.as_u64()..present_slot.as_u64() { + if let Err(e) = state.per_slot_processing(parent_block_root, &self.spec) { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::SlotProcessingError(e), + )); + } + } + + // Apply the received block to its parent state (which has been transitioned into this + // slot). + if let Err(e) = state.per_block_processing(&block, &self.spec) { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::PerBlockProcessingError(e), + )); + } + + let state_root = state.canonical_root(); + + if block.state_root != state_root { + return Ok(BlockProcessingOutcome::InvalidBlock( + InvalidBlock::StateRootMismatch, + )); + } + + // Store the block and state. + self.block_store.put(&block_root, &ssz_encode(&block)[..])?; + self.state_store.put(&state_root, &ssz_encode(&state)[..])?; + + // run the fork_choice add_block logic + self.fork_choice.write().add_block(&block, &block_root)?; + + // If the parent block was the parent_block, automatically update the canonical head. + // + // TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be + // run instead. + if self.head().beacon_block_root == parent_block_root { + self.update_canonical_head( + block.clone(), + block_root.clone(), + state.clone(), + state_root, + ); + // Update the local state variable. + *self.state.write() = state.clone(); + } + + Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) + } + + /// Produce a new block at the present slot. + /// + /// The produced block will not be inherently valid, it must be signed by a block producer. + /// Block signing is out of the scope of this function and should be done by a separate program. + pub fn produce_block(&self, randao_reveal: Signature) -> Option<(BeaconBlock, BeaconState)> { + debug!("Producing block at slot {}...", self.state.read().slot); + + let mut state = self.state.read().clone(); + + trace!("Finding attestations for new block..."); + + let attestations = self + .attestation_aggregator + .read() + .get_attestations_for_state(&state, &self.spec); + + trace!( + "Inserting {} attestation(s) into new block.", + attestations.len() + ); + + let parent_root = *state.get_block_root(state.slot.saturating_sub(1_u64), &self.spec)?; + + let mut block = BeaconBlock { + slot: state.slot, + parent_root, + state_root: Hash256::zero(), // Updated after the state is calculated. + randao_reveal, + eth1_data: Eth1Data { + // TODO: replace with real data + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, + signature: self.spec.empty_signature.clone(), // To be completed by a validator. + body: BeaconBlockBody { + proposer_slashings: vec![], + attester_slashings: vec![], + attestations, + deposits: vec![], + exits: vec![], + }, + }; + + state + .per_block_processing_without_verifying_block_signature(&block, &self.spec) + .ok()?; + + let state_root = state.canonical_root(); + + block.state_root = state_root; + + trace!("Block produced."); + + Some((block, state)) + } + + // TODO: Left this as is, modify later + pub fn fork_choice(&self) -> Result<(), Error> { + let present_head = self.finalized_head().beacon_block_root; + + let new_head = self.fork_choice.write().find_head(&present_head)?; + + if new_head != present_head { + let block = self + .block_store + .get_deserialized(&new_head)? + .ok_or_else(|| Error::MissingBeaconBlock(new_head))?; + let block_root = block.canonical_root(); + + let state = self + .state_store + .get_deserialized(&block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; + let state_root = state.canonical_root(); + + self.update_canonical_head(block, block_root, state, state_root); + } + + Ok(()) + } +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError(e.message) + } +} + +impl From for Error { + fn from(e: ForkChoiceError) -> Error { + Error::ForkChoiceError(e) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs new file mode 100644 index 000000000..bef97d2ed --- /dev/null +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -0,0 +1,43 @@ +use serde_derive::Serialize; +use types::{BeaconBlock, BeaconState, Hash256}; + +/// Represents some block and it's associated state. Generally, this will be used for tracking the +/// head, justified head and finalized head. +#[derive(PartialEq, Clone, Serialize)] +pub struct CheckPoint { + pub beacon_block: BeaconBlock, + pub beacon_block_root: Hash256, + pub beacon_state: BeaconState, + pub beacon_state_root: Hash256, +} + +impl CheckPoint { + /// Create a new checkpoint. + pub fn new( + beacon_block: BeaconBlock, + beacon_block_root: Hash256, + beacon_state: BeaconState, + beacon_state_root: Hash256, + ) -> Self { + Self { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + } + } + + /// Update all fields of the checkpoint. + pub fn update( + &mut self, + beacon_block: BeaconBlock, + beacon_block_root: Hash256, + beacon_state: BeaconState, + beacon_state_root: Hash256, + ) { + self.beacon_block = beacon_block; + self.beacon_block_root = beacon_block_root; + self.beacon_state = beacon_state; + self.beacon_state_root = beacon_state_root; + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs new file mode 100644 index 000000000..4dac0b672 --- /dev/null +++ b/beacon_node/beacon_chain/src/lib.rs @@ -0,0 +1,7 @@ +mod attestation_aggregator; +mod beacon_chain; +mod checkpoint; + +pub use self::beacon_chain::{BeaconChain, Error}; +pub use self::checkpoint::CheckPoint; +pub use fork_choice::{ForkChoice, ForkChoiceAlgorithms, ForkChoiceError}; diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml new file mode 100644 index 000000000..bb335c152 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "test_harness" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[[bench]] +name = "state_transition" +harness = false + +[dev-dependencies] +criterion = "0.2" + +[dependencies] +attester = { path = "../../../eth2/attester" } +beacon_chain = { path = "../../beacon_chain" } +block_producer = { path = "../../../eth2/block_producer" } +bls = { path = "../../../eth2/utils/bls" } +boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" } +db = { path = "../../db" } +parking_lot = "0.7" +failure = "0.1" +failure_derive = "0.1" +fork_choice = { path = "../../../eth2/fork_choice" } +hashing = { path = "../../../eth2/utils/hashing" } +log = "0.4" +env_logger = "0.6.0" +rayon = "1.0" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slot_clock = { path = "../../../eth2/utils/slot_clock" } +ssz = { path = "../../../eth2/utils/ssz" } +types = { path = "../../../eth2/types" } diff --git a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs new file mode 100644 index 000000000..013ecfd1e --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs @@ -0,0 +1,68 @@ +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +// use env_logger::{Builder, Env}; +use test_harness::BeaconChainHarness; +use types::{ChainSpec, Hash256}; + +fn mid_epoch_state_transition(c: &mut Criterion) { + // Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 1000; + let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + let epoch_depth = (rig.spec.epoch_length * 2) + (rig.spec.epoch_length / 2); + + for _ in 0..epoch_depth { + rig.advance_chain_with_block(); + } + + let state = rig.beacon_chain.state.read().clone(); + + assert!((state.slot + 1) % rig.spec.epoch_length != 0); + + c.bench_function("mid-epoch state transition 10k validators", move |b| { + let state = state.clone(); + b.iter(|| { + let mut state = state.clone(); + black_box(state.per_slot_processing(Hash256::zero(), &rig.spec)) + }) + }); +} + +fn epoch_boundary_state_transition(c: &mut Criterion) { + // Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 10000; + let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + let epoch_depth = rig.spec.epoch_length * 2; + + for _ in 0..(epoch_depth - 1) { + rig.advance_chain_with_block(); + } + + let state = rig.beacon_chain.state.read().clone(); + + assert_eq!((state.slot + 1) % rig.spec.epoch_length, 0); + + c.bench( + "routines", + Benchmark::new("routine_1", move |b| { + let state = state.clone(); + b.iter(|| { + let mut state = state.clone(); + black_box(black_box( + state.per_slot_processing(Hash256::zero(), &rig.spec), + )) + }) + }) + .sample_size(5), // sample size is low because function is sloooow. + ); +} + +criterion_group!( + benches, + mid_epoch_state_transition, + epoch_boundary_state_transition +); +criterion_main!(benches); diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs new file mode 100644 index 000000000..09621268c --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -0,0 +1,245 @@ +use super::ValidatorHarness; +use beacon_chain::BeaconChain; +pub use beacon_chain::{CheckPoint, Error as BeaconChainError}; +use bls::create_proof_of_possession; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + MemoryDB, +}; +use fork_choice::{optimised_lmd_ghost::OptimisedLMDGhost, slow_lmd_ghost::SlowLMDGhost}; // import all the algorithms +use log::debug; +use rayon::prelude::*; +use slot_clock::TestingSlotClock; +use std::collections::HashSet; +use std::fs::File; +use std::io::prelude::*; +use std::iter::FromIterator; +use std::sync::Arc; +use types::{ + BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, FreeAttestation, Hash256, + Keypair, Slot, +}; + +/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected +/// to it. Each validator is provided a borrow to the beacon chain, where it may read +/// information and submit blocks/attestations for processing. +/// +/// This test harness is useful for testing validator and internal state transition logic. It +/// is not useful for testing that multiple beacon nodes can reach consensus. +pub struct BeaconChainHarness { + pub db: Arc, + pub beacon_chain: Arc>>, + pub block_store: Arc>, + pub state_store: Arc>, + pub validators: Vec, + pub spec: Arc, +} + +impl BeaconChainHarness { + /// Create a new harness with: + /// + /// - A keypair, `BlockProducer` and `Attester` for each validator. + /// - A new BeaconChain struct where the given validators are in the genesis. + pub fn new(spec: ChainSpec, validator_count: usize) -> Self { + let db = Arc::new(MemoryDB::open()); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); + let fork_choice = OptimisedLMDGhost::new(block_store.clone(), state_store.clone()); + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + + debug!("Generating validator keypairs..."); + + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|_| Keypair::random()) + .collect(); + + debug!("Creating validator deposits..."); + + let initial_validator_deposits = keypairs + .par_iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + debug!("Creating the BeaconChain..."); + + // Create the Beacon Chain + let beacon_chain = Arc::new( + BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_time, + latest_eth1_data, + initial_validator_deposits, + spec.clone(), + fork_choice, + ) + .unwrap(), + ); + + let spec = Arc::new(spec); + + debug!("Creating validator producer and attester instances..."); + + // Spawn the test validator instances. + let validators: Vec = keypairs + .iter() + .map(|keypair| { + ValidatorHarness::new(keypair.clone(), beacon_chain.clone(), spec.clone()) + }) + .collect(); + + debug!("Created {} ValidatorHarnesss", validators.len()); + + Self { + db, + beacon_chain, + block_store, + state_store, + validators, + spec, + } + } + + /// Move the `slot_clock` for the `BeaconChain` forward one slot. + /// + /// This is the equivalent of advancing a system clock forward one `SLOT_DURATION`. + /// + /// Returns the new slot. + pub fn increment_beacon_chain_slot(&mut self) -> Slot { + let slot = self.beacon_chain.present_slot() + 1; + + debug!("Incrementing BeaconChain slot to {}.", slot); + + self.beacon_chain.slot_clock.set_slot(slot.as_u64()); + self.beacon_chain.advance_state(slot).unwrap(); + slot + } + + /// Gather the `FreeAttestation`s from the valiators. + /// + /// Note: validators will only produce attestations _once per slot_. So, if you call this twice + /// you'll only get attestations on the first run. + pub fn gather_free_attesations(&mut self) -> Vec { + let present_slot = self.beacon_chain.present_slot(); + + let attesting_validators = self + .beacon_chain + .state + .read() + .get_crosslink_committees_at_slot(present_slot, false, &self.spec) + .unwrap() + .iter() + .fold(vec![], |mut acc, (committee, _slot)| { + acc.append(&mut committee.clone()); + acc + }); + let attesting_validators: HashSet = + HashSet::from_iter(attesting_validators.iter().cloned()); + + let free_attestations: Vec = self + .validators + .par_iter_mut() + .enumerate() + .filter_map(|(i, validator)| { + if attesting_validators.contains(&i) { + // Advance the validator slot. + validator.set_slot(present_slot); + + // Prompt the validator to produce an attestation (if required). + validator.produce_free_attestation().ok() + } else { + None + } + }) + .collect(); + + debug!( + "Gathered {} FreeAttestations for slot {}.", + free_attestations.len(), + present_slot + ); + + free_attestations + } + + /// Get the block from the proposer for the slot. + /// + /// Note: the validator will only produce it _once per slot_. So, if you call this twice you'll + /// only get a block once. + pub fn produce_block(&mut self) -> BeaconBlock { + let present_slot = self.beacon_chain.present_slot(); + + let proposer = self.beacon_chain.block_proposer(present_slot).unwrap(); + + debug!( + "Producing block from validator #{} for slot {}.", + proposer, present_slot + ); + + // Ensure the validators slot clock is accurate. + self.validators[proposer].set_slot(present_slot); + self.validators[proposer].produce_block().unwrap() + } + + /// Advances the chain with a BeaconBlock and attestations from all validators. + /// + /// This is the ideal scenario for the Beacon Chain, 100% honest participation from + /// validators. + pub fn advance_chain_with_block(&mut self) { + self.increment_beacon_chain_slot(); + + // Produce a new block. + let block = self.produce_block(); + debug!("Submitting block for processing..."); + self.beacon_chain.process_block(block).unwrap(); + debug!("...block processed by BeaconChain."); + + debug!("Producing free attestations..."); + + // Produce new attestations. + let free_attestations = self.gather_free_attesations(); + + debug!("Processing free attestations..."); + + free_attestations.par_iter().for_each(|free_attestation| { + self.beacon_chain + .process_free_attestation(free_attestation.clone()) + .unwrap(); + }); + + debug!("Free attestations processed."); + } + + /// Dump all blocks and states from the canonical beacon chain. + pub fn chain_dump(&self) -> Result, BeaconChainError> { + self.beacon_chain.chain_dump() + } + + /// Write the output of `chain_dump` to a JSON file. + pub fn dump_to_file(&self, filename: String, chain_dump: &[CheckPoint]) { + let json = serde_json::to_string(chain_dump).unwrap(); + let mut file = File::create(filename).unwrap(); + file.write_all(json.as_bytes()) + .expect("Failed writing dump to file."); + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs new file mode 100644 index 000000000..b04fc6996 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -0,0 +1,5 @@ +mod beacon_chain_harness; +mod validator_harness; + +pub use self::beacon_chain_harness::BeaconChainHarness; +pub use self::validator_harness::ValidatorHarness; diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs new file mode 100644 index 000000000..be71b9abd --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs @@ -0,0 +1,108 @@ +use attester::{ + BeaconNode as AttesterBeaconNode, BeaconNodeError as NodeError, + PublishOutcome as AttestationPublishOutcome, +}; +use beacon_chain::BeaconChain; +use block_producer::{ + BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError, + PublishOutcome as BlockPublishOutcome, +}; +use db::ClientDB; +use fork_choice::ForkChoice; +use parking_lot::RwLock; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot}; + +// mod attester; +// mod producer; + +/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit +/// blocks/attestations. +/// +/// `BeaconBlock`s and `FreeAttestation`s are not actually published to the `BeaconChain`, instead +/// they are stored inside this struct. This is to allow one to benchmark the submission of the +/// block/attestation directly, or modify it before submission. +pub struct DirectBeaconNode { + beacon_chain: Arc>, + published_blocks: RwLock>, + published_attestations: RwLock>, +} + +impl DirectBeaconNode { + pub fn new(beacon_chain: Arc>) -> Self { + Self { + beacon_chain, + published_blocks: RwLock::new(vec![]), + published_attestations: RwLock::new(vec![]), + } + } + + /// Get the last published block (if any). + pub fn last_published_block(&self) -> Option { + Some(self.published_blocks.read().last()?.clone()) + } + + /// Get the last published attestation (if any). + pub fn last_published_free_attestation(&self) -> Option { + Some(self.published_attestations.read().last()?.clone()) + } +} + +impl AttesterBeaconNode for DirectBeaconNode { + fn produce_attestation_data( + &self, + _slot: Slot, + shard: u64, + ) -> Result, NodeError> { + match self.beacon_chain.produce_attestation_data(shard) { + Ok(attestation_data) => Ok(Some(attestation_data)), + Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))), + } + } + + fn publish_attestation_data( + &self, + free_attestation: FreeAttestation, + ) -> Result { + self.published_attestations.write().push(free_attestation); + Ok(AttestationPublishOutcome::ValidAttestation) + } +} + +impl BeaconBlockNode for DirectBeaconNode { + /// Requests a new `BeaconBlock from the `BeaconChain`. + fn produce_beacon_block( + &self, + slot: Slot, + randao_reveal: &Signature, + ) -> Result, BeaconBlockNodeError> { + let (block, _state) = self + .beacon_chain + .produce_block(randao_reveal.clone()) + .ok_or_else(|| { + BeaconBlockNodeError::RemoteFailure("Did not produce block.".to_string()) + })?; + + if block.slot == slot { + Ok(Some(block)) + } else { + Err(BeaconBlockNodeError::RemoteFailure( + "Unable to produce at non-current slot.".to_string(), + )) + } + } + + /// A block is not _actually_ published to the `BeaconChain`, instead it is stored in the + /// `published_block_vec` and a successful `ValidBlock` is returned to the caller. + /// + /// The block may be retrieved and then applied to the `BeaconChain` manually, potentially in a + /// benchmarking scenario. + fn publish_beacon_block( + &self, + block: BeaconBlock, + ) -> Result { + self.published_blocks.write().push(block); + Ok(BlockPublishOutcome::ValidBlock) + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs new file mode 100644 index 000000000..66b9d650c --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_duties.rs @@ -0,0 +1,70 @@ +use attester::{ + DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError, +}; +use beacon_chain::BeaconChain; +use block_producer::{ + DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError, +}; +use db::ClientDB; +use fork_choice::ForkChoice; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{PublicKey, Slot}; + +/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from +/// it. +pub struct DirectDuties { + beacon_chain: Arc>, + pubkey: PublicKey, +} + +impl DirectDuties { + pub fn new(pubkey: PublicKey, beacon_chain: Arc>) -> Self { + Self { + beacon_chain, + pubkey, + } + } +} + +impl ProducerDutiesReader for DirectDuties { + fn is_block_production_slot(&self, slot: Slot) -> Result { + let validator_index = self + .beacon_chain + .validator_index(&self.pubkey) + .ok_or_else(|| ProducerDutiesReaderError::UnknownValidator)?; + + match self.beacon_chain.block_proposer(slot) { + Ok(proposer) if proposer == validator_index => Ok(true), + Ok(_) => Ok(false), + Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch), + } + } +} + +impl AttesterDutiesReader for DirectDuties { + fn validator_index(&self) -> Option { + match self.beacon_chain.validator_index(&self.pubkey) { + Some(index) => Some(index as u64), + None => None, + } + } + + fn attestation_shard(&self, slot: Slot) -> Result, AttesterDutiesReaderError> { + if let Some(validator_index) = self.validator_index() { + match self + .beacon_chain + .validator_attestion_slot_and_shard(validator_index as usize) + { + Ok(Some((attest_slot, attest_shard))) if attest_slot == slot => { + Ok(Some(attest_shard)) + } + Ok(Some(_)) => Ok(None), + Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch), + Err(_) => unreachable!("Error when getting validator attestation shard."), + } + } else { + Err(AttesterDutiesReaderError::UnknownValidator) + } + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs new file mode 100644 index 000000000..8e901b057 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/local_signer.rs @@ -0,0 +1,47 @@ +use attester::Signer as AttesterSigner; +use block_producer::Signer as BlockProposerSigner; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to perform signing for a proposer or attester. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new TestSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } + + /// Sign some message. + fn bls_sign(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} + +impl BlockProposerSigner for LocalSigner { + fn sign_block_proposal(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } + + fn sign_randao_reveal(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } +} + +impl AttesterSigner for LocalSigner { + fn sign_attestation_message(&self, message: &[u8]) -> Option { + self.bls_sign(message) + } +} diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs new file mode 100644 index 000000000..e22ea1a2e --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/validator_harness/mod.rs @@ -0,0 +1,137 @@ +mod direct_beacon_node; +mod direct_duties; +mod local_signer; + +use attester::PollOutcome as AttestationPollOutcome; +use attester::{Attester, Error as AttestationPollError}; +use beacon_chain::BeaconChain; +use block_producer::PollOutcome as BlockPollOutcome; +use block_producer::{BlockProducer, Error as BlockPollError}; +use db::MemoryDB; +use direct_beacon_node::DirectBeaconNode; +use direct_duties::DirectDuties; +use fork_choice::{optimised_lmd_ghost::OptimisedLMDGhost, slow_lmd_ghost::SlowLMDGhost}; +use local_signer::LocalSigner; +use slot_clock::TestingSlotClock; +use std::sync::Arc; +use types::{BeaconBlock, ChainSpec, FreeAttestation, Keypair, Slot}; + +#[derive(Debug, PartialEq)] +pub enum BlockProduceError { + DidNotProduce(BlockPollOutcome), + PollError(BlockPollError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationProduceError { + DidNotProduce(AttestationPollOutcome), + PollError(AttestationPollError), +} + +/// A `BlockProducer` and `Attester` which sign using a common keypair. +/// +/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for +/// testing that the core proposer and attester logic is functioning. Also for supporting beacon +/// chain tests. +pub struct ValidatorHarness { + pub block_producer: BlockProducer< + TestingSlotClock, + DirectBeaconNode>, + DirectDuties>, + LocalSigner, + >, + pub attester: Attester< + TestingSlotClock, + DirectBeaconNode>, + DirectDuties>, + LocalSigner, + >, + pub spec: Arc, + pub epoch_map: Arc>>, + pub keypair: Keypair, + pub beacon_node: Arc>>, + pub slot_clock: Arc, + pub signer: Arc, +} + +impl ValidatorHarness { + /// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the + /// supplied beacon node. + /// + /// A `BlockProducer` and `Attester` is created.. + pub fn new( + keypair: Keypair, + beacon_chain: Arc>>, + spec: Arc, + ) -> Self { + let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64())); + let signer = Arc::new(LocalSigner::new(keypair.clone())); + let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone())); + let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone())); + + let block_producer = BlockProducer::new( + spec.clone(), + epoch_map.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + let attester = Attester::new( + epoch_map.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + Self { + block_producer, + attester, + spec, + epoch_map, + keypair, + beacon_node, + slot_clock, + signer, + } + } + + /// Run the `poll` function on the `BlockProducer` and produce a block. + /// + /// An error is returned if the producer refuses to produce. + pub fn produce_block(&mut self) -> Result { + // Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to + // publish a block. + match self.block_producer.poll() { + Ok(BlockPollOutcome::BlockProduced(_)) => {} + Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)), + Err(error) => return Err(BlockProduceError::PollError(error)), + }; + Ok(self + .beacon_node + .last_published_block() + .expect("Unable to obtain produced block.")) + } + + /// Run the `poll` function on the `Attester` and produce a `FreeAttestation`. + /// + /// An error is returned if the attester refuses to attest. + pub fn produce_free_attestation(&mut self) -> Result { + match self.attester.poll() { + Ok(AttestationPollOutcome::AttestationProduced(_)) => {} + Ok(outcome) => return Err(AttestationProduceError::DidNotProduce(outcome)), + Err(error) => return Err(AttestationProduceError::PollError(error)), + }; + Ok(self + .beacon_node + .last_published_free_attestation() + .expect("Unable to obtain produced attestation.")) + } + + /// Set the validators slot clock to the specified slot. + /// + /// The validators slot clock will always read this value until it is set to something else. + pub fn set_slot(&mut self, slot: Slot) { + self.slot_clock.set_slot(slot.as_u64()) + } +} diff --git a/beacon_node/beacon_chain/test_harness/tests/chain.rs b/beacon_node/beacon_chain/test_harness/tests/chain.rs new file mode 100644 index 000000000..8be6f2a26 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/tests/chain.rs @@ -0,0 +1,47 @@ +use env_logger::{Builder, Env}; +use log::debug; +use test_harness::BeaconChainHarness; +use types::{ChainSpec, Slot}; + +#[test] +#[ignore] +fn it_can_build_on_genesis_block() { + let mut spec = ChainSpec::foundation(); + spec.genesis_slot = Slot::new(spec.epoch_length * 8); + + /* + spec.shard_count = spec.shard_count / 8; + spec.target_committee_size = spec.target_committee_size / 8; + */ + let validator_count = 1000; + + let mut harness = BeaconChainHarness::new(spec, validator_count as usize); + + harness.advance_chain_with_block(); +} + +#[test] +#[ignore] +fn it_can_produce_past_first_epoch_boundary() { + Builder::from_env(Env::default().default_filter_or("debug")).init(); + + let validator_count = 100; + + debug!("Starting harness build..."); + + let mut harness = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); + + debug!("Harness built, tests starting.."); + + let blocks = harness.spec.epoch_length * 3 + 1; + + for i in 0..blocks { + harness.advance_chain_with_block(); + debug!("Produced block {}/{}.", i, blocks); + } + let dump = harness.chain_dump().expect("Chain dump failed."); + + assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block. + + harness.dump_to_file("/tmp/chaindump.json".to_string(), &dump); +} diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml new file mode 100644 index 000000000..122aaa34d --- /dev/null +++ b/beacon_node/db/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "db" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +blake2-rfc = "0.2.18" +bls = { path = "../../eth2/utils/bls" } +bytes = "0.4.10" +rocksdb = "0.10.1" +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/db/src/disk_db.rs b/beacon_node/db/src/disk_db.rs new file mode 100644 index 000000000..9d8a71bc4 --- /dev/null +++ b/beacon_node/db/src/disk_db.rs @@ -0,0 +1,197 @@ +extern crate rocksdb; + +use super::rocksdb::Error as RocksError; +use super::rocksdb::{Options, DB}; +use super::{ClientDB, DBError, DBValue}; +use std::fs; +use std::path::Path; + +/// A on-disk database which implements the ClientDB trait. +/// +/// This implementation uses RocksDB with default options. +pub struct DiskDB { + db: DB, +} + +impl DiskDB { + /// Open the RocksDB database, optionally supplying columns if required. + /// + /// The RocksDB database will be contained in a directory titled + /// "database" in the supplied path. + /// + /// # Panics + /// + /// Panics if the database is unable to be created. + pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { + /* + * Initialise the options + */ + let mut options = Options::default(); + options.create_if_missing(true); + + // TODO: ensure that columns are created (and remove + // the dead_code allow) + + /* + * Initialise the path + */ + fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); + let db_path = path.join("database"); + + /* + * Open the database + */ + let db = match columns { + None => DB::open(&options, db_path), + Some(columns) => DB::open_cf(&options, db_path, columns), + } + .expect("Unable to open local database");; + + Self { db } + } + + /// Create a RocksDB column family. Corresponds to the + /// `create_cf()` function on the RocksDB API. + #[allow(dead_code)] + fn create_col(&mut self, col: &str) -> Result<(), DBError> { + match self.db.create_cf(col, &Options::default()) { + Err(e) => Err(e.into()), + Ok(_) => Ok(()), + } + } +} + +impl From for DBError { + fn from(e: RocksError) -> Self { + Self { + message: e.to_string(), + } + } +} + +impl ClientDB for DiskDB { + /// Get the value for some key on some column. + /// + /// Corresponds to the `get_cf()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => match self.db.get_cf(handle, key)? { + None => Ok(None), + Some(db_vec) => Ok(Some(DBValue::from(&*db_vec))), + }, + } + } + + /// Set some value for some key on some column. + /// + /// Corresponds to the `cf_handle()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), + } + } + + /// Return true if some key exists in some column. + fn exists(&self, col: &str, key: &[u8]) -> Result { + /* + * I'm not sure if this is the correct way to read if some + * block exists. Naively I would expect this to unncessarily + * copy some data, but I could be wrong. + */ + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some()), + } + } + + /// Delete the value for some key on some column. + /// + /// Corresponds to the `delete_cf()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => { + self.db.delete_cf(handle, key)?; + Ok(()) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::super::ClientDB; + use super::*; + use std::sync::Arc; + use std::{env, fs, thread}; + + #[test] + #[ignore] + fn test_rocksdb_can_use_db() { + let pwd = env::current_dir().unwrap(); + let path = pwd.join("testdb_please_remove"); + let _ = fs::remove_dir_all(&path); + fs::create_dir_all(&path).unwrap(); + + let col_name: &str = "TestColumn"; + let column_families = vec![col_name]; + + let mut db = DiskDB::open(&path, None); + + for cf in column_families { + db.create_col(&cf).unwrap(); + } + + let db = Arc::new(db); + + let thread_count = 10; + let write_count = 10; + + // We're execting the product of these numbers to fit in one byte. + assert!(thread_count * write_count <= 255); + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let db = db.clone(); + let col = col_name.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = (t * w) as u8; + let val = 42; + db.put(&col, &vec![key], &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = (t * w) as u8; + let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + fs::remove_dir_all(&path).unwrap(); + } +} diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs new file mode 100644 index 000000000..a646d7d2e --- /dev/null +++ b/beacon_node/db/src/lib.rs @@ -0,0 +1,14 @@ +extern crate blake2_rfc as blake2; +extern crate bls; +extern crate rocksdb; + +mod disk_db; +mod memory_db; +pub mod stores; +mod traits; + +use self::stores::COLUMNS; + +pub use self::disk_db::DiskDB; +pub use self::memory_db::MemoryDB; +pub use self::traits::{ClientDB, DBError, DBValue}; diff --git a/beacon_node/db/src/memory_db.rs b/beacon_node/db/src/memory_db.rs new file mode 100644 index 000000000..008e5912f --- /dev/null +++ b/beacon_node/db/src/memory_db.rs @@ -0,0 +1,236 @@ +use super::blake2::blake2b::blake2b; +use super::COLUMNS; +use super::{ClientDB, DBError, DBValue}; +use std::collections::{HashMap, HashSet}; +use std::sync::RwLock; + +type DBHashMap = HashMap, Vec>; +type ColumnHashSet = HashSet; + +/// An in-memory database implementing the ClientDB trait. +/// +/// It is not particularily optimized, it exists for ease and speed of testing. It's not expected +/// this DB would be used outside of tests. +pub struct MemoryDB { + db: RwLock, + known_columns: RwLock, +} + +impl MemoryDB { + /// Open the in-memory database. + /// + /// All columns must be supplied initially, you will get an error if you try to access a column + /// that was not declared here. This condition is enforced artificially to simulate RocksDB. + pub fn open() -> Self { + let db: DBHashMap = HashMap::new(); + let mut known_columns: ColumnHashSet = HashSet::new(); + for col in &COLUMNS { + known_columns.insert(col.to_string()); + } + Self { + db: RwLock::new(db), + known_columns: RwLock::new(known_columns), + } + } + + /// Hashes a key and a column name in order to get a unique key for the supplied column. + fn get_key_for_col(col: &str, key: &[u8]) -> Vec { + blake2b(32, col.as_bytes(), key).as_bytes().to_vec() + } +} + +impl ClientDB for MemoryDB { + /// Get the value of some key from the database. Returns `None` if the key does not exist. + fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { + // Panic if the DB locks are poisoned. + let db = self.db.read().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Puts a key in the database. + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { + // Panic if the DB locks are poisoned. + let mut db = self.db.write().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + db.insert(column_key, val.to_vec()); + Ok(()) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Return true if some key exists in some column. + fn exists(&self, col: &str, key: &[u8]) -> Result { + // Panic if the DB locks are poisoned. + let db = self.db.read().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + Ok(db.contains_key(&column_key)) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Delete some key from the database. + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { + // Panic if the DB locks are poisoned. + let mut db = self.db.write().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + db.remove(&column_key); + Ok(()) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::super::stores::{BLOCKS_DB_COLUMN, VALIDATOR_DB_COLUMN}; + use super::super::ClientDB; + use super::*; + use std::sync::Arc; + use std::thread; + + #[test] + fn test_memorydb_can_delete() { + let col_a: &str = BLOCKS_DB_COLUMN; + + let db = MemoryDB::open(); + + db.put(col_a, "dogs".as_bytes(), "lol".as_bytes()).unwrap(); + + assert_eq!( + db.get(col_a, "dogs".as_bytes()).unwrap().unwrap(), + "lol".as_bytes() + ); + + db.delete(col_a, "dogs".as_bytes()).unwrap(); + + assert_eq!(db.get(col_a, "dogs".as_bytes()).unwrap(), None); + } + + #[test] + fn test_memorydb_column_access() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_b: &str = VALIDATOR_DB_COLUMN; + + let db = MemoryDB::open(); + + /* + * Testing that if we write to the same key in different columns that + * there is not an overlap. + */ + db.put(col_a, "same".as_bytes(), "cat".as_bytes()).unwrap(); + db.put(col_b, "same".as_bytes(), "dog".as_bytes()).unwrap(); + + assert_eq!( + db.get(col_a, "same".as_bytes()).unwrap().unwrap(), + "cat".as_bytes() + ); + assert_eq!( + db.get(col_b, "same".as_bytes()).unwrap().unwrap(), + "dog".as_bytes() + ); + } + + #[test] + fn test_memorydb_unknown_column_access() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_x: &str = "ColumnX"; + + let db = MemoryDB::open(); + + /* + * Test that we get errors when using undeclared columns + */ + assert!(db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).is_ok()); + assert!(db.put(col_x, "cats".as_bytes(), "lol".as_bytes()).is_err()); + + assert!(db.get(col_a, "cats".as_bytes()).is_ok()); + assert!(db.get(col_x, "cats".as_bytes()).is_err()); + } + + #[test] + fn test_memorydb_exists() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_b: &str = VALIDATOR_DB_COLUMN; + + let db = MemoryDB::open(); + + /* + * Testing that if we write to the same key in different columns that + * there is not an overlap. + */ + db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap(); + + assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap()); + assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap()); + + assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap()); + assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap()); + } + + #[test] + fn test_memorydb_threading() { + let col_name: &str = BLOCKS_DB_COLUMN; + + let db = Arc::new(MemoryDB::open()); + + let thread_count = 10; + let write_count = 10; + + // We're execting the product of these numbers to fit in one byte. + assert!(thread_count * write_count <= 255); + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let db = db.clone(); + let col = col_name.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = (t * w) as u8; + let val = 42; + db.put(&col, &vec![key], &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = (t * w) as u8; + let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + } +} diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs new file mode 100644 index 000000000..8a1fc2b0d --- /dev/null +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -0,0 +1,265 @@ +use super::BLOCKS_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use ssz::Decodable; +use std::sync::Arc; +use types::{readers::BeaconBlockReader, BeaconBlock, Hash256, Slot}; + +#[derive(Clone, Debug, PartialEq)] +pub enum BeaconBlockAtSlotError { + UnknownBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), + DBError(String), +} + +pub struct BeaconBlockStore +where + T: ClientDB, +{ + db: Arc, +} + +// Implements `put`, `get`, `exists` and `delete` for the store. +impl_crud_for_store!(BeaconBlockStore, DB_COLUMN); + +impl BeaconBlockStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad BeaconBlock SSZ.".to_string(), + })?; + Ok(Some(block)) + } + } + } + + /// Retuns an object implementing `BeaconBlockReader`, or `None` (if hash not known). + /// + /// Note: Presently, this function fully deserializes a `BeaconBlock` and returns that. In the + /// future, it would be ideal to return an object capable of reading directly from serialized + /// SSZ bytes. + pub fn get_reader(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad BeaconBlock SSZ.".to_string(), + })?; + Ok(Some(block)) + } + } + } + + /// Retrieve the block at a slot given a "head_hash" and a slot. + /// + /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired + /// slot. + /// + /// This function will read each block down the chain until it finds a block with the given + /// slot number. If the slot is skipped, the function will return None. + /// + /// If a block is found, a tuple of (block_hash, serialized_block) is returned. + /// + /// Note: this function uses a loop instead of recursion as the compiler is over-strict when it + /// comes to recursion and the `impl Trait` pattern. See: + /// https://stackoverflow.com/questions/54032940/using-impl-trait-in-a-recursive-function + pub fn block_at_slot( + &self, + head_hash: &Hash256, + slot: Slot, + ) -> Result, BeaconBlockAtSlotError> { + let mut current_hash = *head_hash; + + loop { + if let Some(block_reader) = self.get_reader(¤t_hash)? { + if block_reader.slot() == slot { + break Ok(Some((current_hash, block_reader))); + } else if block_reader.slot() < slot { + break Ok(None); + } else { + current_hash = block_reader.parent_root(); + } + } else { + break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); + } + } + } +} + +impl From for BeaconBlockAtSlotError { + fn from(e: DBError) -> Self { + BeaconBlockAtSlotError::DBError(e.message) + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + + use std::sync::Arc; + use std::thread; + + use ssz::ssz_encode; + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::BeaconBlock; + use types::Hash256; + + test_crud_for_store!(BeaconBlockStore, DB_COLUMN); + + #[test] + fn head_hash_slot_too_low() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + let mut rng = XorShiftRng::from_seed([42; 16]); + + let mut block = BeaconBlock::random_for_test(&mut rng); + block.slot = Slot::from(10_u64); + + let block_root = block.canonical_root(); + bs.put(&block_root, &ssz_encode(&block)).unwrap(); + + let result = bs.block_at_slot(&block_root, Slot::from(11_u64)).unwrap(); + assert_eq!(result, None); + } + + #[test] + fn test_invalid_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconBlockStore::new(db.clone()); + + let ssz = "definitly not a valid block".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert_eq!( + store.block_at_slot(hash, Slot::from(42_u64)), + Err(BeaconBlockAtSlotError::DBError( + "Bad BeaconBlock SSZ.".into() + )) + ); + } + + #[test] + fn test_unknown_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconBlockStore::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + let other_hash = &Hash256::from("another hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert_eq!( + store.block_at_slot(other_hash, Slot::from(42_u64)), + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) + ); + } + + #[test] + fn test_block_store_on_memory_db() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + + let thread_count = 10; + let write_count = 10; + + // We're expecting the product of these numbers to fit in one byte. + assert!(thread_count * write_count <= 255); + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let bs = bs.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = (t * w) as u8; + let val = 42; + bs.put(&[key][..].into(), &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = (t * w) as u8; + assert!(bs.exists(&[key][..].into()).unwrap()); + let val = bs.get(&[key][..].into()).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + } + + #[test] + fn test_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + let mut rng = XorShiftRng::from_seed([42; 16]); + + // Specify test block parameters. + let hashes = [ + Hash256::from(&[0; 32][..]), + Hash256::from(&[1; 32][..]), + Hash256::from(&[2; 32][..]), + Hash256::from(&[3; 32][..]), + Hash256::from(&[4; 32][..]), + ]; + let parent_hashes = [ + Hash256::from(&[255; 32][..]), // Genesis block. + Hash256::from(&[0; 32][..]), + Hash256::from(&[1; 32][..]), + Hash256::from(&[2; 32][..]), + Hash256::from(&[3; 32][..]), + ]; + let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); + + // Generate a vec of random blocks and store them in the DB. + let block_count = 5; + let mut blocks: Vec = Vec::with_capacity(5); + for i in 0..block_count { + let mut block = BeaconBlock::random_for_test(&mut rng); + + block.parent_root = parent_hashes[i]; + block.slot = slots[i]; + + let ssz = ssz_encode(&block); + db.put(DB_COLUMN, &hashes[i], &ssz).unwrap(); + + blocks.push(block); + } + + // Test that certain slots can be reached from certain hashes. + let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; + for (hashes_index, slot_index) in test_cases { + let (matched_block_hash, reader) = bs + .block_at_slot(&hashes[hashes_index], slots[slot_index]) + .unwrap() + .unwrap(); + assert_eq!(matched_block_hash, hashes[slot_index]); + assert_eq!(reader.slot(), slots[slot_index]); + } + + let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); + assert_eq!(ssz, None); + + let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); + assert_eq!(ssz, None); + + let bad_hash = &Hash256::from("unknown".as_bytes()); + let ssz = bs.block_at_slot(bad_hash, Slot::new(2)); + assert_eq!( + ssz, + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*bad_hash)) + ); + } +} diff --git a/beacon_node/db/src/stores/beacon_state_store.rs b/beacon_node/db/src/stores/beacon_state_store.rs new file mode 100644 index 000000000..ed22696cb --- /dev/null +++ b/beacon_node/db/src/stores/beacon_state_store.rs @@ -0,0 +1,80 @@ +use super::STATES_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use ssz::Decodable; +use std::sync::Arc; +use types::{readers::BeaconStateReader, BeaconState, Hash256}; + +pub struct BeaconStateStore +where + T: ClientDB, +{ + db: Arc, +} + +// Implements `put`, `get`, `exists` and `delete` for the store. +impl_crud_for_store!(BeaconStateStore, DB_COLUMN); + +impl BeaconStateStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad State SSZ.".to_string(), + })?; + Ok(Some(state)) + } + } + } + + /// Retuns an object implementing `BeaconStateReader`, or `None` (if hash not known). + /// + /// Note: Presently, this function fully deserializes a `BeaconState` and returns that. In the + /// future, it would be ideal to return an object capable of reading directly from serialized + /// SSZ bytes. + pub fn get_reader(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad State SSZ.".to_string(), + })?; + Ok(Some(state)) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + + use ssz::ssz_encode; + use std::sync::Arc; + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::Hash256; + + test_crud_for_store!(BeaconStateStore, DB_COLUMN); + + #[test] + fn test_reader() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconStateStore::new(db.clone()); + + let mut rng = XorShiftRng::from_seed([42; 16]); + let state = BeaconState::random_for_test(&mut rng); + let state_root = state.canonical_root(); + + store.put(&state_root, &ssz_encode(&state)).unwrap(); + + let reader = store.get_reader(&state_root).unwrap().unwrap(); + let decoded = reader.into_beacon_state().unwrap(); + + assert_eq!(state, decoded); + } +} diff --git a/beacon_node/db/src/stores/macros.rs b/beacon_node/db/src/stores/macros.rs new file mode 100644 index 000000000..36b8aef8e --- /dev/null +++ b/beacon_node/db/src/stores/macros.rs @@ -0,0 +1,103 @@ +macro_rules! impl_crud_for_store { + ($store: ident, $db_column: expr) => { + impl $store { + pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { + self.db.put($db_column, hash, ssz) + } + + pub fn get(&self, hash: &Hash256) -> Result>, DBError> { + self.db.get($db_column, hash) + } + + pub fn exists(&self, hash: &Hash256) -> Result { + self.db.exists($db_column, hash) + } + + pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { + self.db.delete($db_column, hash) + } + } + }; +} + +#[allow(unused_macros)] +macro_rules! test_crud_for_store { + ($store: ident, $db_column: expr) => { + #[test] + fn test_put() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + + store.put(hash, ssz).unwrap(); + assert_eq!(db.get(DB_COLUMN, hash).unwrap().unwrap(), ssz); + } + + #[test] + fn test_get() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert_eq!(store.get(hash).unwrap().unwrap(), ssz); + } + + #[test] + fn test_get_unknown() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + let other_hash = &Hash256::from("another hash".as_bytes()); + + db.put(DB_COLUMN, other_hash, ssz).unwrap(); + assert_eq!(store.get(hash).unwrap(), None); + } + + #[test] + fn test_exists() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert!(store.exists(hash).unwrap()); + } + + #[test] + fn test_block_does_not_exist() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + let other_hash = &Hash256::from("another hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert!(!store.exists(other_hash).unwrap()); + } + + #[test] + fn test_delete() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from("some hash".as_bytes()); + + db.put(DB_COLUMN, hash, ssz).unwrap(); + assert!(db.exists(DB_COLUMN, hash).unwrap()); + + store.delete(hash).unwrap(); + assert!(!db.exists(DB_COLUMN, hash).unwrap()); + } + }; +} diff --git a/beacon_node/db/src/stores/mod.rs b/beacon_node/db/src/stores/mod.rs new file mode 100644 index 000000000..44de7eed1 --- /dev/null +++ b/beacon_node/db/src/stores/mod.rs @@ -0,0 +1,25 @@ +use super::{ClientDB, DBError}; + +#[macro_use] +mod macros; +mod beacon_block_store; +mod beacon_state_store; +mod pow_chain_store; +mod validator_store; + +pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore}; +pub use self::beacon_state_store::BeaconStateStore; +pub use self::pow_chain_store::PoWChainStore; +pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; + +pub const BLOCKS_DB_COLUMN: &str = "blocks"; +pub const STATES_DB_COLUMN: &str = "states"; +pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; +pub const VALIDATOR_DB_COLUMN: &str = "validator"; + +pub const COLUMNS: [&str; 4] = [ + BLOCKS_DB_COLUMN, + STATES_DB_COLUMN, + POW_CHAIN_DB_COLUMN, + VALIDATOR_DB_COLUMN, +]; diff --git a/beacon_node/db/src/stores/pow_chain_store.rs b/beacon_node/db/src/stores/pow_chain_store.rs new file mode 100644 index 000000000..a7c77bab5 --- /dev/null +++ b/beacon_node/db/src/stores/pow_chain_store.rs @@ -0,0 +1,68 @@ +use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use std::sync::Arc; + +pub struct PoWChainStore +where + T: ClientDB, +{ + db: Arc, +} + +impl PoWChainStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> { + self.db.put(DB_COLUMN, hash, &[0]) + } + + pub fn block_hash_exists(&self, hash: &[u8]) -> Result { + self.db.exists(DB_COLUMN, hash) + } +} + +#[cfg(test)] +mod tests { + extern crate types; + + use super::super::super::MemoryDB; + use super::*; + + use self::types::Hash256; + + #[test] + fn test_put_block_hash() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from("some hash".as_bytes()).to_vec(); + store.put_block_hash(hash).unwrap(); + + assert!(db.exists(DB_COLUMN, hash).unwrap()); + } + + #[test] + fn test_block_hash_exists() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from("some hash".as_bytes()).to_vec(); + db.put(DB_COLUMN, hash, &[0]).unwrap(); + + assert!(store.block_hash_exists(hash).unwrap()); + } + + #[test] + fn test_block_hash_does_not_exist() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from("some hash".as_bytes()).to_vec(); + let other_hash = &Hash256::from("another hash".as_bytes()).to_vec(); + db.put(DB_COLUMN, hash, &[0]).unwrap(); + + assert!(!store.block_hash_exists(other_hash).unwrap()); + } +} diff --git a/beacon_node/db/src/stores/validator_store.rs b/beacon_node/db/src/stores/validator_store.rs new file mode 100644 index 000000000..02e90dc5c --- /dev/null +++ b/beacon_node/db/src/stores/validator_store.rs @@ -0,0 +1,215 @@ +extern crate bytes; + +use self::bytes::{BufMut, BytesMut}; +use super::VALIDATOR_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use bls::PublicKey; +use ssz::{ssz_encode, Decodable}; +use std::sync::Arc; + +#[derive(Debug, PartialEq)] +pub enum ValidatorStoreError { + DBError(String), + DecodeError, +} + +impl From for ValidatorStoreError { + fn from(error: DBError) -> Self { + ValidatorStoreError::DBError(error.message) + } +} + +#[derive(Debug, PartialEq)] +enum KeyPrefixes { + PublicKey, +} + +pub struct ValidatorStore +where + T: ClientDB, +{ + db: Arc, +} + +impl ValidatorStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec { + match key_prefix { + KeyPrefixes::PublicKey => b"pubkey".to_vec(), + } + } + + fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec { + let mut buf = BytesMut::with_capacity(6 + 8); + buf.put(self.prefix_bytes(key_prefix)); + buf.put_u64_be(index as u64); + buf.take().to_vec() + } + + pub fn put_public_key_by_index( + &self, + index: usize, + public_key: &PublicKey, + ) -> Result<(), ValidatorStoreError> { + let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); + let val = ssz_encode(public_key); + self.db + .put(DB_COLUMN, &key[..], &val[..]) + .map_err(ValidatorStoreError::from) + } + + pub fn get_public_key_by_index( + &self, + index: usize, + ) -> Result, ValidatorStoreError> { + let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); + let val = self.db.get(DB_COLUMN, &key[..])?; + match val { + None => Ok(None), + Some(val) => match PublicKey::ssz_decode(&val, 0) { + Ok((key, _)) => Ok(Some(key)), + Err(_) => Err(ValidatorStoreError::DecodeError), + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + use bls::Keypair; + + #[test] + fn test_prefix_bytes() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + assert_eq!( + store.prefix_bytes(&KeyPrefixes::PublicKey), + b"pubkey".to_vec() + ); + } + + #[test] + fn test_get_db_key_for_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let mut buf = BytesMut::with_capacity(6 + 8); + buf.put(b"pubkey".to_vec()); + buf.put_u64_be(42); + assert_eq!( + store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42), + buf.take().to_vec() + ) + } + + #[test] + fn test_put_public_key_by_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let index = 3; + let public_key = Keypair::random().pk; + + store.put_public_key_by_index(index, &public_key).unwrap(); + let public_key_at_index = db + .get( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], + ) + .unwrap() + .unwrap(); + + assert_eq!(public_key_at_index, ssz_encode(&public_key)); + } + + #[test] + fn test_get_public_key_by_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let index = 4; + let public_key = Keypair::random().pk; + + db.put( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], + &ssz_encode(&public_key)[..], + ) + .unwrap(); + + let public_key_at_index = store.get_public_key_by_index(index).unwrap().unwrap(); + assert_eq!(public_key_at_index, public_key); + } + + #[test] + fn test_get_public_key_by_unknown_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let public_key = Keypair::random().pk; + + db.put( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, 3)[..], + &ssz_encode(&public_key)[..], + ) + .unwrap(); + + let public_key_at_index = store.get_public_key_by_index(4).unwrap(); + assert_eq!(public_key_at_index, None); + } + + #[test] + fn test_get_invalid_public_key() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); + db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); + + assert_eq!( + store.get_public_key_by_index(42), + Err(ValidatorStoreError::DecodeError) + ); + } + + #[test] + fn test_validator_store_put_get() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db); + + let keys = vec![ + Keypair::random(), + Keypair::random(), + Keypair::random(), + Keypair::random(), + Keypair::random(), + ]; + + for i in 0..keys.len() { + store.put_public_key_by_index(i, &keys[i].pk).unwrap(); + } + + /* + * Check all keys are retrieved correctly. + */ + for i in 0..keys.len() { + let retrieved = store.get_public_key_by_index(i).unwrap().unwrap(); + assert_eq!(retrieved, keys[i].pk); + } + + /* + * Check that an index that wasn't stored returns None. + */ + assert!(store + .get_public_key_by_index(keys.len() + 1) + .unwrap() + .is_none()); + } +} diff --git a/beacon_node/db/src/traits.rs b/beacon_node/db/src/traits.rs new file mode 100644 index 000000000..41be3e23d --- /dev/null +++ b/beacon_node/db/src/traits.rs @@ -0,0 +1,28 @@ +pub type DBValue = Vec; + +#[derive(Debug)] +pub struct DBError { + pub message: String, +} + +impl DBError { + pub fn new(message: String) -> Self { + Self { message } + } +} + +/// A generic database to be used by the "client' (i.e., +/// the lighthouse blockchain client). +/// +/// The purpose of having this generic trait is to allow the +/// program to use a persistent on-disk database during production, +/// but use a transient database during tests. +pub trait ClientDB: Sync + Send { + fn get(&self, col: &str, key: &[u8]) -> Result, DBError>; + + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError>; + + fn exists(&self, col: &str, key: &[u8]) -> Result; + + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError>; +} diff --git a/beacon_node/src/config/mod.rs b/beacon_node/src/config/mod.rs new file mode 100644 index 000000000..5c94e300c --- /dev/null +++ b/beacon_node/src/config/mod.rs @@ -0,0 +1,30 @@ +use std::fs; +use std::path::PathBuf; + +/// Stores the core configuration for this Lighthouse instance. +/// This struct is general, other components may implement more +/// specialized config structs. +#[derive(Clone)] +pub struct LighthouseConfig { + pub data_dir: PathBuf, + pub p2p_listen_port: u16, +} + +const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse"; + +impl LighthouseConfig { + /// Build a new lighthouse configuration from defaults. + pub fn default() -> Self { + let data_dir = { + let home = dirs::home_dir().expect("Unable to determine home dir."); + home.join(DEFAULT_LIGHTHOUSE_DIR) + }; + fs::create_dir_all(&data_dir) + .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); + let p2p_listen_port = 0; + Self { + data_dir, + p2p_listen_port, + } + } +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs new file mode 100644 index 000000000..2b6cdddcd --- /dev/null +++ b/beacon_node/src/main.rs @@ -0,0 +1,134 @@ +extern crate slog; + +mod config; +mod rpc; + +use std::path::PathBuf; + +use crate::config::LighthouseConfig; +use crate::rpc::start_server; +use beacon_chain::BeaconChain; +use bls::create_proof_of_possession; +use clap::{App, Arg}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + MemoryDB, +}; +use fork_choice::optimised_lmd_ghost::OptimisedLMDGhost; +use slog::{error, info, o, Drain}; +use slot_clock::SystemTimeSlotClock; +use std::sync::Arc; +use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; + +fn main() { + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::CompactFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let log = slog::Logger::root(drain, o!()); + + let matches = App::new("Lighthouse") + .version("0.0.1") + .author("Sigma Prime ") + .about("Eth 2.0 Client") + .arg( + Arg::with_name("datadir") + .long("datadir") + .value_name("DIR") + .help("Data directory for keys and databases.") + .takes_value(true), + ) + .arg( + Arg::with_name("port") + .long("port") + .value_name("PORT") + .help("Network listen port for p2p connections.") + .takes_value(true), + ) + .get_matches(); + + let mut config = LighthouseConfig::default(); + + // Custom datadir + if let Some(dir) = matches.value_of("datadir") { + config.data_dir = PathBuf::from(dir.to_string()); + } + + // Custom p2p listen port + if let Some(port_str) = matches.value_of("port") { + if let Ok(port) = port_str.parse::() { + config.p2p_listen_port = port; + } else { + error!(log, "Invalid port"; "port" => port_str); + return; + } + } + + // Log configuration + info!(log, ""; + "data_dir" => &config.data_dir.to_str(), + "port" => &config.p2p_listen_port); + + // Specification (presently fixed to foundation). + let spec = ChainSpec::foundation(); + + // Database (presently in-memory) + let db = Arc::new(MemoryDB::open()); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + // Slot clock + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = OptimisedLMDGhost::new(block_store.clone(), state_store.clone()); + + /* + * Generate some random data to start a chain with. + * + * This is will need to be replace for production usage. + */ + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + let keypairs: Vec = (0..10) + .collect::>() + .iter() + .map(|_| Keypair::random()) + .collect(); + let initial_validator_deposits = keypairs + .iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + // Genesis chain + let _chain_result = BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_time, + latest_eth1_data, + initial_validator_deposits, + spec, + fork_choice, + ); + + let _server = start_server(log.clone()); + + loop { + std::thread::sleep(std::time::Duration::from_secs(1)); + } +} diff --git a/beacon_node/src/rpc/beacon_block.rs b/beacon_node/src/rpc/beacon_block.rs new file mode 100644 index 000000000..96f64e0dd --- /dev/null +++ b/beacon_node/src/rpc/beacon_block.rs @@ -0,0 +1,57 @@ +use futures::Future; +use grpcio::{RpcContext, UnarySink}; +use protos::services::{ + BeaconBlock as BeaconBlockProto, ProduceBeaconBlockRequest, ProduceBeaconBlockResponse, + PublishBeaconBlockRequest, PublishBeaconBlockResponse, +}; +use protos::services_grpc::BeaconBlockService; +use slog::Logger; + +#[derive(Clone)] +pub struct BeaconBlockServiceInstance { + pub log: Logger, +} + +impl BeaconBlockService for BeaconBlockServiceInstance { + /// Produce a `BeaconBlock` for signing by a validator. + fn produce_beacon_block( + &mut self, + ctx: RpcContext, + req: ProduceBeaconBlockRequest, + sink: UnarySink, + ) { + println!("producing at slot {}", req.get_slot()); + + // TODO: build a legit block. + let mut block = BeaconBlockProto::new(); + block.set_slot(req.get_slot()); + block.set_block_root(b"cats".to_vec()); + + let mut resp = ProduceBeaconBlockResponse::new(); + resp.set_block(block); + + let f = sink + .success(resp) + .map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); + ctx.spawn(f) + } + + /// Accept some fully-formed `BeaconBlock`, process and publish it. + fn publish_beacon_block( + &mut self, + ctx: RpcContext, + req: PublishBeaconBlockRequest, + sink: UnarySink, + ) { + println!("publishing {:?}", req.get_block()); + + // TODO: actually process the block. + let mut resp = PublishBeaconBlockResponse::new(); + resp.set_success(true); + + let f = sink + .success(resp) + .map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); + ctx.spawn(f) + } +} diff --git a/beacon_node/src/rpc/mod.rs b/beacon_node/src/rpc/mod.rs new file mode 100644 index 000000000..6a18a4aa8 --- /dev/null +++ b/beacon_node/src/rpc/mod.rs @@ -0,0 +1,36 @@ +mod beacon_block; +mod validator; + +use self::beacon_block::BeaconBlockServiceInstance; +use self::validator::ValidatorServiceInstance; +use grpcio::{Environment, Server, ServerBuilder}; +use protos::services_grpc::{create_beacon_block_service, create_validator_service}; +use std::sync::Arc; + +use slog::{info, Logger}; + +pub fn start_server(log: Logger) -> Server { + let log_clone = log.clone(); + let env = Arc::new(Environment::new(1)); + + let beacon_block_service = { + let instance = BeaconBlockServiceInstance { log: log.clone() }; + create_beacon_block_service(instance) + }; + let validator_service = { + let instance = ValidatorServiceInstance { log: log.clone() }; + create_validator_service(instance) + }; + + let mut server = ServerBuilder::new(env) + .register_service(beacon_block_service) + .register_service(validator_service) + .bind("127.0.0.1", 50_051) + .build() + .unwrap(); + server.start(); + for &(ref host, port) in server.bind_addrs() { + info!(log_clone, "gRPC listening on {}:{}", host, port); + } + server +} diff --git a/beacon_node/src/rpc/validator.rs b/beacon_node/src/rpc/validator.rs new file mode 100644 index 000000000..f894deca6 --- /dev/null +++ b/beacon_node/src/rpc/validator.rs @@ -0,0 +1,64 @@ +use bls::PublicKey; +use futures::Future; +use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; +use protos::services::{ + IndexResponse, ProposeBlockSlotRequest, ProposeBlockSlotResponse, PublicKey as PublicKeyRequest, +}; +use protos::services_grpc::ValidatorService; +use slog::{debug, Logger}; +use ssz::Decodable; + +#[derive(Clone)] +pub struct ValidatorServiceInstance { + pub log: Logger, +} + +impl ValidatorService for ValidatorServiceInstance { + fn validator_index( + &mut self, + ctx: RpcContext, + req: PublicKeyRequest, + sink: UnarySink, + ) { + if let Ok((public_key, _)) = PublicKey::ssz_decode(req.get_public_key(), 0) { + debug!(self.log, "RPC request"; "endpoint" => "ValidatorIndex", "public_key" => public_key.concatenated_hex_id()); + + let mut resp = IndexResponse::new(); + + // TODO: return a legit value. + resp.set_index(1); + + let f = sink + .success(resp) + .map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); + ctx.spawn(f) + } else { + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::InvalidArgument, + Some("Invalid public_key".to_string()), + )) + .map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); + ctx.spawn(f) + } + } + + fn propose_block_slot( + &mut self, + ctx: RpcContext, + req: ProposeBlockSlotRequest, + sink: UnarySink, + ) { + debug!(self.log, "RPC request"; "endpoint" => "ProposeBlockSlot", "epoch" => req.get_epoch(), "validator_index" => req.get_validator_index()); + + let mut resp = ProposeBlockSlotResponse::new(); + + // TODO: return a legit value. + resp.set_slot(1); + + let f = sink + .success(resp) + .map_err(move |e| println!("failed to reply {:?}: {:?}", req, e)); + ctx.spawn(f) + } +} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 484f6a518..000000000 --- a/docs/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Lighthouse Documentation - -Table of Contents: - - -- [ONBOARDING.md](ONBOARDING.md): General on-boarding info, - including style-guide. -- [LIGHTHOUSE.md](LIGHTHOUSE.md): Project goals and ethos. -- [RUNNING.md](RUNNING.md): Step-by-step on getting the code running. -- [SERENITY.md](SERENITY.md): Introduction to Ethereum Serenity. diff --git a/docs/RUNNING.md b/docs/RUNNING.md deleted file mode 100644 index 4c9d411b3..000000000 --- a/docs/RUNNING.md +++ /dev/null @@ -1,50 +0,0 @@ -# Running Lighthouse Code - -These documents provide a guide for running code in the following repositories: - -- [lighthouse-libs](https://github.com/sigp/lighthouse-libs) -- [lighthouse-beacon](https://github.com/sigp/lighthouse-beacon) -- [lighthouse-validator](https://github.com/sigp/lighthouse-validator) - -This code-base is still very much under-development and does not provide any -user-facing functionality. For developers and researchers, there are several -tests and benchmarks which may be of interest. - -A few basic steps are needed to get set up: - - 1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust - (Linux | macos | Windows). For installation run the below command in your - terminal `$ curl https://sh.rustup.rs -sSf | sh` - 2. (Linux & MacOS) To configure your current shell run: `$ source - $HOME/.cargo/env` - 3. Use the command `rustup show` to get information about the Rust - installation. You should see that the active toolchain is the stable - version. - 4. Run `rustc --version` to check the installation and version of rust. - - Updates can be performed using` rustup update` . - 5. Install build dependencies (Arch packages are listed here, your - distribution will likely be similar): - - `clang`: required by RocksDB. `protobuf`: required for protobuf - - serialization (gRPC). - 6. Navigate to the working directory. - 7. Run the test by using command `cargo test --all`. By running, it will - pass all the required test cases. If you are doing it for the first time, - then you can grab a coffee in the meantime. Usually, it takes time to build, - compile and pass all test cases. If there is no error then it means - everything is working properly and it's time to get your hands dirty. In - case, if there is an error, then please raise the - [issue](https://github.com/sigp/lighthouse/issues). We will help you. - 8. As an alternative to, or instead of the above step, you may also run - benchmarks by using the command `cargo bench --all`. (Note: not all - repositories have benchmarking). - -##### Note: Lighthouse presently runs on Rust `stable`. - -##### Note for Windows users: Perl may also be required to build lighthouse. -You can install [Strawberry Perl](http://strawberryperl.com/), or alternatively -use a choco install command `choco install strawberryperl`. - -Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues -compiling in Windows. You can specify a known working version by editing -version in protos/Cargo.toml's "build-dependencies" section to `protoc-grpcio = -"<=0.3.0"`. diff --git a/docs/LIGHTHOUSE.md b/docs/lighthouse.md similarity index 100% rename from docs/LIGHTHOUSE.md rename to docs/lighthouse.md diff --git a/docs/ONBOARDING.md b/docs/onboarding.md similarity index 100% rename from docs/ONBOARDING.md rename to docs/onboarding.md diff --git a/docs/SERENITY.md b/docs/serenity.md similarity index 100% rename from docs/SERENITY.md rename to docs/serenity.md diff --git a/eth2/attester/Cargo.toml b/eth2/attester/Cargo.toml new file mode 100644 index 000000000..956ecf565 --- /dev/null +++ b/eth2/attester/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "attester" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/eth2/attester/src/lib.rs b/eth2/attester/src/lib.rs new file mode 100644 index 000000000..7352dd2ea --- /dev/null +++ b/eth2/attester/src/lib.rs @@ -0,0 +1,250 @@ +pub mod test_utils; +mod traits; + +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{AttestationData, FreeAttestation, Signature, Slot}; + +pub use self::traits::{ + BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, +}; + +const PHASE_0_CUSTODY_BIT: bool = false; + +#[derive(Debug, PartialEq)] +pub enum PollOutcome { + AttestationProduced(Slot), + AttestationNotRequired(Slot), + SlashableAttestationNotProduced(Slot), + BeaconNodeUnableToProduceAttestation(Slot), + ProducerDutiesUnknown(Slot), + SlotAlreadyProcessed(Slot), + SignerRejection(Slot), + ValidatorIsUnknown(Slot), +} + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotClockError, + SlotUnknowable, + EpochMapPoisoned, + SlotClockPoisoned, + EpochLengthIsZero, + BeaconNodeError(BeaconNodeError), +} + +/// A polling state machine which performs block production duties, based upon some epoch duties +/// (`EpochDutiesMap`) and a concept of time (`SlotClock`). +/// +/// Ensures that messages are not slashable. +/// +/// Relies upon an external service to keep the `EpochDutiesMap` updated. +pub struct Attester { + pub last_processed_slot: Option, + duties: Arc, + slot_clock: Arc, + beacon_node: Arc, + signer: Arc, +} + +impl Attester { + /// Returns a new instance where `last_processed_slot == 0`. + pub fn new(duties: Arc, slot_clock: Arc, beacon_node: Arc, signer: Arc) -> Self { + Self { + last_processed_slot: None, + duties, + slot_clock, + beacon_node, + signer, + } + } +} + +impl Attester { + /// Poll the `BeaconNode` and produce an attestation if required. + pub fn poll(&mut self) -> Result { + let slot = self + .slot_clock + .present_slot() + .map_err(|_| Error::SlotClockError)? + .ok_or(Error::SlotUnknowable)?; + + if !self.is_processed_slot(slot) { + self.last_processed_slot = Some(slot); + + let shard = match self.duties.attestation_shard(slot) { + Ok(Some(result)) => result, + Ok(None) => return Ok(PollOutcome::AttestationNotRequired(slot)), + Err(DutiesReaderError::UnknownEpoch) => { + return Ok(PollOutcome::ProducerDutiesUnknown(slot)); + } + Err(DutiesReaderError::UnknownValidator) => { + return Ok(PollOutcome::ValidatorIsUnknown(slot)); + } + Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero), + Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned), + }; + + self.produce_attestation(slot, shard) + } else { + Ok(PollOutcome::SlotAlreadyProcessed(slot)) + } + } + + fn produce_attestation(&mut self, slot: Slot, shard: u64) -> Result { + let attestation_data = match self.beacon_node.produce_attestation_data(slot, shard)? { + Some(attestation_data) => attestation_data, + None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)), + }; + + if !self.safe_to_produce(&attestation_data) { + return Ok(PollOutcome::SlashableAttestationNotProduced(slot)); + } + + let signature = match self.sign_attestation_data(&attestation_data) { + Some(signature) => signature, + None => return Ok(PollOutcome::SignerRejection(slot)), + }; + + let validator_index = match self.duties.validator_index() { + Some(validator_index) => validator_index, + None => return Ok(PollOutcome::ValidatorIsUnknown(slot)), + }; + + let free_attestation = FreeAttestation { + data: attestation_data, + signature, + validator_index, + }; + + self.beacon_node + .publish_attestation_data(free_attestation)?; + Ok(PollOutcome::AttestationProduced(slot)) + } + + fn is_processed_slot(&self, slot: Slot) -> bool { + match self.last_processed_slot { + Some(processed_slot) if slot <= processed_slot => true, + _ => false, + } + } + + /// Consumes a block, returning that block signed by the validators private key. + /// + /// Important: this function will not check to ensure the block is not slashable. This must be + /// done upstream. + fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option { + self.store_produce(attestation_data); + + self.signer + .sign_attestation_message(&attestation_data.signable_message(PHASE_0_CUSTODY_BIT)[..]) + } + + /// Returns `true` if signing some attestation_data is safe (non-slashable). + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn safe_to_produce(&self, _attestation_data: &AttestationData) -> bool { + // TODO: ensure the producer doesn't produce slashable blocks. + // https://github.com/sigp/lighthouse/issues/160 + true + } + + /// Record that a block was produced so that slashable votes may not be made in the future. + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn store_produce(&mut self, _block: &AttestationData) { + // TODO: record this block production to prevent future slashings. + // https://github.com/sigp/lighthouse/issues/160 + } +} + +impl From for Error { + fn from(e: BeaconNodeError) -> Error { + Error::BeaconNodeError(e) + } +} + +#[cfg(test)] +mod tests { + use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode}; + use super::*; + use slot_clock::TestingSlotClock; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + ChainSpec, Keypair, + }; + + // TODO: implement more thorough testing. + // https://github.com/sigp/lighthouse/issues/160 + // + // These tests should serve as a good example for future tests. + + #[test] + pub fn polling() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let spec = Arc::new(ChainSpec::foundation()); + let slot_clock = Arc::new(TestingSlotClock::new(0)); + let beacon_node = Arc::new(SimulatedBeaconNode::default()); + let signer = Arc::new(LocalSigner::new(Keypair::random())); + + let mut duties = EpochMap::new(spec.epoch_length); + let attest_slot = Slot::new(100); + let attest_epoch = attest_slot / spec.epoch_length; + let attest_shard = 12; + duties.insert_attestation_shard(attest_slot, attest_shard); + duties.set_validator_index(Some(2)); + let duties = Arc::new(duties); + + let mut attester = Attester::new( + duties.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + // Configure responses from the BeaconNode. + beacon_node.set_next_produce_result(Ok(Some(AttestationData::random_for_test(&mut rng)))); + beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidAttestation)); + + // One slot before attestation slot... + slot_clock.set_slot(attest_slot.as_u64() - 1); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationNotRequired(attest_slot - 1)) + ); + + // On the attest slot... + slot_clock.set_slot(attest_slot.as_u64()); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationProduced(attest_slot)) + ); + + // Trying the same attest slot again... + slot_clock.set_slot(attest_slot.as_u64()); + assert_eq!( + attester.poll(), + Ok(PollOutcome::SlotAlreadyProcessed(attest_slot)) + ); + + // One slot after the attest slot... + slot_clock.set_slot(attest_slot.as_u64() + 1); + assert_eq!( + attester.poll(), + Ok(PollOutcome::AttestationNotRequired(attest_slot + 1)) + ); + + // In an epoch without known duties... + let slot = (attest_epoch + 1) * spec.epoch_length; + slot_clock.set_slot(slot.into()); + assert_eq!( + attester.poll(), + Ok(PollOutcome::ProducerDutiesUnknown(slot)) + ); + } +} diff --git a/eth2/attester/src/test_utils/epoch_map.rs b/eth2/attester/src/test_utils/epoch_map.rs new file mode 100644 index 000000000..f0dc4312e --- /dev/null +++ b/eth2/attester/src/test_utils/epoch_map.rs @@ -0,0 +1,44 @@ +use crate::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; +use types::{Epoch, Slot}; + +pub struct EpochMap { + epoch_length: u64, + validator_index: Option, + map: HashMap, +} + +impl EpochMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + validator_index: None, + map: HashMap::new(), + } + } + + pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) { + let epoch = slot.epoch(self.epoch_length); + self.map.insert(epoch, (slot, shard)); + } + + pub fn set_validator_index(&mut self, index: Option) { + self.validator_index = index; + } +} + +impl DutiesReader for EpochMap { + fn attestation_shard(&self, slot: Slot) -> Result, DutiesReaderError> { + let epoch = slot.epoch(self.epoch_length); + + match self.map.get(&epoch) { + Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)), + Some((attest_slot, _attest_shard)) if *attest_slot != slot => Ok(None), + _ => Err(DutiesReaderError::UnknownEpoch), + } + } + + fn validator_index(&self) -> Option { + self.validator_index + } +} diff --git a/eth2/attester/src/test_utils/local_signer.rs b/eth2/attester/src/test_utils/local_signer.rs new file mode 100644 index 000000000..c256d1050 --- /dev/null +++ b/eth2/attester/src/test_utils/local_signer.rs @@ -0,0 +1,31 @@ +use crate::traits::Signer; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to simulate a Beacon Node. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new LocalSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } +} + +impl Signer for LocalSigner { + fn sign_attestation_message(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} diff --git a/eth2/attester/src/test_utils/mod.rs b/eth2/attester/src/test_utils/mod.rs new file mode 100644 index 000000000..481247dd0 --- /dev/null +++ b/eth2/attester/src/test_utils/mod.rs @@ -0,0 +1,7 @@ +mod epoch_map; +mod local_signer; +mod simulated_beacon_node; + +pub use self::epoch_map::EpochMap; +pub use self::local_signer::LocalSigner; +pub use self::simulated_beacon_node::SimulatedBeaconNode; diff --git a/eth2/attester/src/test_utils/simulated_beacon_node.rs b/eth2/attester/src/test_utils/simulated_beacon_node.rs new file mode 100644 index 000000000..bab48a975 --- /dev/null +++ b/eth2/attester/src/test_utils/simulated_beacon_node.rs @@ -0,0 +1,44 @@ +use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome}; +use std::sync::RwLock; +use types::{AttestationData, FreeAttestation, Slot}; + +type ProduceResult = Result, BeaconNodeError>; +type PublishResult = Result; + +/// A test-only struct used to simulate a Beacon Node. +#[derive(Default)] +pub struct SimulatedBeaconNode { + pub produce_input: RwLock>, + pub produce_result: RwLock>, + + pub publish_input: RwLock>, + pub publish_result: RwLock>, +} + +impl SimulatedBeaconNode { + pub fn set_next_produce_result(&self, result: ProduceResult) { + *self.produce_result.write().unwrap() = Some(result); + } + + pub fn set_next_publish_result(&self, result: PublishResult) { + *self.publish_result.write().unwrap() = Some(result); + } +} + +impl BeaconNode for SimulatedBeaconNode { + fn produce_attestation_data(&self, slot: Slot, shard: u64) -> ProduceResult { + *self.produce_input.write().unwrap() = Some((slot, shard)); + match *self.produce_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("TestBeaconNode: produce_result == None"), + } + } + + fn publish_attestation_data(&self, free_attestation: FreeAttestation) -> PublishResult { + *self.publish_input.write().unwrap() = Some(free_attestation.clone()); + match *self.publish_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("TestBeaconNode: publish_result == None"), + } + } +} diff --git a/eth2/attester/src/traits.rs b/eth2/attester/src/traits.rs new file mode 100644 index 000000000..53bce3aaa --- /dev/null +++ b/eth2/attester/src/traits.rs @@ -0,0 +1,49 @@ +use types::{AttestationData, FreeAttestation, Signature, Slot}; + +#[derive(Debug, PartialEq, Clone)] +pub enum BeaconNodeError { + RemoteFailure(String), + DecodeFailure, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum PublishOutcome { + ValidAttestation, + InvalidAttestation(String), +} + +/// Defines the methods required to produce and publish blocks on a Beacon Node. +pub trait BeaconNode: Send + Sync { + fn produce_attestation_data( + &self, + slot: Slot, + shard: u64, + ) -> Result, BeaconNodeError>; + + fn publish_attestation_data( + &self, + free_attestation: FreeAttestation, + ) -> Result; +} + +#[derive(Debug, PartialEq, Clone)] +pub enum DutiesReaderError { + UnknownValidator, + UnknownEpoch, + EpochLengthIsZero, + Poisoned, +} + +/// Informs a validator of their duties (e.g., block production). +pub trait DutiesReader: Send + Sync { + /// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.` + fn attestation_shard(&self, slot: Slot) -> Result, DutiesReaderError>; + + /// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.` + fn validator_index(&self) -> Option; +} + +/// Signs message using an internally-maintained private key. +pub trait Signer { + fn sign_attestation_message(&self, message: &[u8]) -> Option; +} diff --git a/eth2/block_producer/Cargo.toml b/eth2/block_producer/Cargo.toml new file mode 100644 index 000000000..86dde92f7 --- /dev/null +++ b/eth2/block_producer/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "block_producer" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +slot_clock = { path = "../../eth2/utils/slot_clock" } +ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../../eth2/types" } diff --git a/eth2/block_producer/src/lib.rs b/eth2/block_producer/src/lib.rs new file mode 100644 index 000000000..f6a0fd6df --- /dev/null +++ b/eth2/block_producer/src/lib.rs @@ -0,0 +1,287 @@ +pub mod test_utils; +mod traits; + +use slot_clock::SlotClock; +use ssz::ssz_encode; +use std::sync::Arc; +use types::{BeaconBlock, ChainSpec, Slot}; + +pub use self::traits::{ + BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, +}; + +#[derive(Debug, PartialEq)] +pub enum PollOutcome { + /// A new block was produced. + BlockProduced(Slot), + /// A block was not produced as it would have been slashable. + SlashableBlockNotProduced(Slot), + /// The validator duties did not require a block to be produced. + BlockProductionNotRequired(Slot), + /// The duties for the present epoch were not found. + ProducerDutiesUnknown(Slot), + /// The slot has already been processed, execution was skipped. + SlotAlreadyProcessed(Slot), + /// The Beacon Node was unable to produce a block at that slot. + BeaconNodeUnableToProduceBlock(Slot), + /// The signer failed to sign the message. + SignerRejection(Slot), + /// The public key for this validator is not an active validator. + ValidatorIsUnknown(Slot), +} + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotClockError, + SlotUnknowable, + EpochMapPoisoned, + SlotClockPoisoned, + EpochLengthIsZero, + BeaconNodeError(BeaconNodeError), +} + +/// A polling state machine which performs block production duties, based upon some epoch duties +/// (`EpochDutiesMap`) and a concept of time (`SlotClock`). +/// +/// Ensures that messages are not slashable. +/// +/// Relies upon an external service to keep the `EpochDutiesMap` updated. +pub struct BlockProducer { + pub last_processed_slot: Option, + spec: Arc, + epoch_map: Arc, + slot_clock: Arc, + beacon_node: Arc, + signer: Arc, +} + +impl BlockProducer { + /// Returns a new instance where `last_processed_slot == 0`. + pub fn new( + spec: Arc, + epoch_map: Arc, + slot_clock: Arc, + beacon_node: Arc, + signer: Arc, + ) -> Self { + Self { + last_processed_slot: None, + spec, + epoch_map, + slot_clock, + beacon_node, + signer, + } + } +} + +impl BlockProducer { + /// "Poll" to see if the validator is required to take any action. + /// + /// The slot clock will be read and any new actions undertaken. + pub fn poll(&mut self) -> Result { + let slot = self + .slot_clock + .present_slot() + .map_err(|_| Error::SlotClockError)? + .ok_or(Error::SlotUnknowable)?; + + // If this is a new slot. + if !self.is_processed_slot(slot) { + let is_block_production_slot = match self.epoch_map.is_block_production_slot(slot) { + Ok(result) => result, + Err(DutiesReaderError::UnknownEpoch) => { + return Ok(PollOutcome::ProducerDutiesUnknown(slot)); + } + Err(DutiesReaderError::UnknownValidator) => { + return Ok(PollOutcome::ValidatorIsUnknown(slot)); + } + Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero), + Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned), + }; + + if is_block_production_slot { + self.last_processed_slot = Some(slot); + + self.produce_block(slot) + } else { + Ok(PollOutcome::BlockProductionNotRequired(slot)) + } + } else { + Ok(PollOutcome::SlotAlreadyProcessed(slot)) + } + } + + fn is_processed_slot(&self, slot: Slot) -> bool { + match self.last_processed_slot { + Some(processed_slot) if processed_slot >= slot => true, + _ => false, + } + } + + /// Produce a block at some slot. + /// + /// Assumes that a block is required at this slot (does not check the duties). + /// + /// Ensures the message is not slashable. + /// + /// !!! UNSAFE !!! + /// + /// The slash-protection code is not yet implemented. There is zero protection against + /// slashing. + fn produce_block(&mut self, slot: Slot) -> Result { + let randao_reveal = { + // TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`. + let message = ssz_encode(&slot.epoch(self.spec.epoch_length)); + + match self.signer.sign_randao_reveal(&message) { + None => return Ok(PollOutcome::SignerRejection(slot)), + Some(signature) => signature, + } + }; + + if let Some(block) = self + .beacon_node + .produce_beacon_block(slot, &randao_reveal)? + { + if self.safe_to_produce(&block) { + if let Some(block) = self.sign_block(block) { + self.beacon_node.publish_beacon_block(block)?; + Ok(PollOutcome::BlockProduced(slot)) + } else { + Ok(PollOutcome::SignerRejection(slot)) + } + } else { + Ok(PollOutcome::SlashableBlockNotProduced(slot)) + } + } else { + Ok(PollOutcome::BeaconNodeUnableToProduceBlock(slot)) + } + } + + /// Consumes a block, returning that block signed by the validators private key. + /// + /// Important: this function will not check to ensure the block is not slashable. This must be + /// done upstream. + fn sign_block(&mut self, mut block: BeaconBlock) -> Option { + self.store_produce(&block); + + match self + .signer + .sign_block_proposal(&block.proposal_root(&self.spec)[..]) + { + None => None, + Some(signature) => { + block.signature = signature; + Some(block) + } + } + } + + /// Returns `true` if signing a block is safe (non-slashable). + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn safe_to_produce(&self, _block: &BeaconBlock) -> bool { + // TODO: ensure the producer doesn't produce slashable blocks. + // https://github.com/sigp/lighthouse/issues/160 + true + } + + /// Record that a block was produced so that slashable votes may not be made in the future. + /// + /// !!! UNSAFE !!! + /// + /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. + fn store_produce(&mut self, _block: &BeaconBlock) { + // TODO: record this block production to prevent future slashings. + // https://github.com/sigp/lighthouse/issues/160 + } +} + +impl From for Error { + fn from(e: BeaconNodeError) -> Error { + Error::BeaconNodeError(e) + } +} + +#[cfg(test)] +mod tests { + use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode}; + use super::*; + use slot_clock::TestingSlotClock; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + Keypair, + }; + + // TODO: implement more thorough testing. + // https://github.com/sigp/lighthouse/issues/160 + // + // These tests should serve as a good example for future tests. + + #[test] + pub fn polling() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let spec = Arc::new(ChainSpec::foundation()); + let slot_clock = Arc::new(TestingSlotClock::new(0)); + let beacon_node = Arc::new(SimulatedBeaconNode::default()); + let signer = Arc::new(LocalSigner::new(Keypair::random())); + + let mut epoch_map = EpochMap::new(spec.epoch_length); + let produce_slot = Slot::new(100); + let produce_epoch = produce_slot.epoch(spec.epoch_length); + epoch_map.map.insert(produce_epoch, produce_slot); + let epoch_map = Arc::new(epoch_map); + + let mut block_producer = BlockProducer::new( + spec.clone(), + epoch_map.clone(), + slot_clock.clone(), + beacon_node.clone(), + signer.clone(), + ); + + // Configure responses from the BeaconNode. + beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng)))); + beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock)); + + // One slot before production slot... + slot_clock.set_slot(produce_slot.as_u64() - 1); + assert_eq!( + block_producer.poll(), + Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1)) + ); + + // On the produce slot... + slot_clock.set_slot(produce_slot.as_u64()); + assert_eq!( + block_producer.poll(), + Ok(PollOutcome::BlockProduced(produce_slot.into())) + ); + + // Trying the same produce slot again... + slot_clock.set_slot(produce_slot.as_u64()); + assert_eq!( + block_producer.poll(), + Ok(PollOutcome::SlotAlreadyProcessed(produce_slot)) + ); + + // One slot after the produce slot... + slot_clock.set_slot(produce_slot.as_u64() + 1); + assert_eq!( + block_producer.poll(), + Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1)) + ); + + // In an epoch without known duties... + let slot = (produce_epoch.as_u64() + 1) * spec.epoch_length; + slot_clock.set_slot(slot); + assert_eq!( + block_producer.poll(), + Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot))) + ); + } +} diff --git a/eth2/block_producer/src/test_utils/epoch_map.rs b/eth2/block_producer/src/test_utils/epoch_map.rs new file mode 100644 index 000000000..e9ed9b68a --- /dev/null +++ b/eth2/block_producer/src/test_utils/epoch_map.rs @@ -0,0 +1,28 @@ +use crate::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; +use types::{Epoch, Slot}; + +pub struct EpochMap { + epoch_length: u64, + pub map: HashMap, +} + +impl EpochMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + map: HashMap::new(), + } + } +} + +impl DutiesReader for EpochMap { + fn is_block_production_slot(&self, slot: Slot) -> Result { + let epoch = slot.epoch(self.epoch_length); + match self.map.get(&epoch) { + Some(s) if *s == slot => Ok(true), + Some(s) if *s != slot => Ok(false), + _ => Err(DutiesReaderError::UnknownEpoch), + } + } +} diff --git a/eth2/block_producer/src/test_utils/local_signer.rs b/eth2/block_producer/src/test_utils/local_signer.rs new file mode 100644 index 000000000..0ebefa29d --- /dev/null +++ b/eth2/block_producer/src/test_utils/local_signer.rs @@ -0,0 +1,35 @@ +use crate::traits::Signer; +use std::sync::RwLock; +use types::{Keypair, Signature}; + +/// A test-only struct used to simulate a Beacon Node. +pub struct LocalSigner { + keypair: Keypair, + should_sign: RwLock, +} + +impl LocalSigner { + /// Produce a new LocalSigner with signing enabled by default. + pub fn new(keypair: Keypair) -> Self { + Self { + keypair, + should_sign: RwLock::new(true), + } + } + + /// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages + /// will be signed. + pub fn enable_signing(&self, enabled: bool) { + *self.should_sign.write().unwrap() = enabled; + } +} + +impl Signer for LocalSigner { + fn sign_block_proposal(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } + + fn sign_randao_reveal(&self, message: &[u8]) -> Option { + Some(Signature::new(message, &self.keypair.sk)) + } +} diff --git a/eth2/block_producer/src/test_utils/mod.rs b/eth2/block_producer/src/test_utils/mod.rs new file mode 100644 index 000000000..481247dd0 --- /dev/null +++ b/eth2/block_producer/src/test_utils/mod.rs @@ -0,0 +1,7 @@ +mod epoch_map; +mod local_signer; +mod simulated_beacon_node; + +pub use self::epoch_map::EpochMap; +pub use self::local_signer::LocalSigner; +pub use self::simulated_beacon_node::SimulatedBeaconNode; diff --git a/eth2/block_producer/src/test_utils/simulated_beacon_node.rs b/eth2/block_producer/src/test_utils/simulated_beacon_node.rs new file mode 100644 index 000000000..c0a12c1ac --- /dev/null +++ b/eth2/block_producer/src/test_utils/simulated_beacon_node.rs @@ -0,0 +1,48 @@ +use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome}; +use std::sync::RwLock; +use types::{BeaconBlock, Signature, Slot}; + +type ProduceResult = Result, BeaconNodeError>; +type PublishResult = Result; + +/// A test-only struct used to simulate a Beacon Node. +#[derive(Default)] +pub struct SimulatedBeaconNode { + pub produce_input: RwLock>, + pub produce_result: RwLock>, + + pub publish_input: RwLock>, + pub publish_result: RwLock>, +} + +impl SimulatedBeaconNode { + /// Set the result to be returned when `produce_beacon_block` is called. + pub fn set_next_produce_result(&self, result: ProduceResult) { + *self.produce_result.write().unwrap() = Some(result); + } + + /// Set the result to be returned when `publish_beacon_block` is called. + pub fn set_next_publish_result(&self, result: PublishResult) { + *self.publish_result.write().unwrap() = Some(result); + } +} + +impl BeaconNode for SimulatedBeaconNode { + /// Returns the value specified by the `set_next_produce_result`. + fn produce_beacon_block(&self, slot: Slot, randao_reveal: &Signature) -> ProduceResult { + *self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone())); + match *self.produce_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("SimulatedBeaconNode: produce_result == None"), + } + } + + /// Returns the value specified by the `set_next_publish_result`. + fn publish_beacon_block(&self, block: BeaconBlock) -> PublishResult { + *self.publish_input.write().unwrap() = Some(block); + match *self.publish_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("SimulatedBeaconNode: publish_result == None"), + } + } +} diff --git a/eth2/block_producer/src/traits.rs b/eth2/block_producer/src/traits.rs new file mode 100644 index 000000000..5eb27bce7 --- /dev/null +++ b/eth2/block_producer/src/traits.rs @@ -0,0 +1,49 @@ +use types::{BeaconBlock, Signature, Slot}; + +#[derive(Debug, PartialEq, Clone)] +pub enum BeaconNodeError { + RemoteFailure(String), + DecodeFailure, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum PublishOutcome { + ValidBlock, + InvalidBlock(String), +} + +/// Defines the methods required to produce and publish blocks on a Beacon Node. +pub trait BeaconNode: Send + Sync { + /// Request that the node produces a block. + /// + /// Returns Ok(None) if the Beacon Node is unable to produce at the given slot. + fn produce_beacon_block( + &self, + slot: Slot, + randao_reveal: &Signature, + ) -> Result, BeaconNodeError>; + + /// Request that the node publishes a block. + /// + /// Returns `true` if the publish was sucessful. + fn publish_beacon_block(&self, block: BeaconBlock) -> Result; +} + +#[derive(Debug, PartialEq, Clone)] +pub enum DutiesReaderError { + UnknownValidator, + UnknownEpoch, + EpochLengthIsZero, + Poisoned, +} + +/// Informs a validator of their duties (e.g., block production). +pub trait DutiesReader: Send + Sync { + fn is_block_production_slot(&self, slot: Slot) -> Result; +} + +/// Signs message using an internally-maintained private key. +pub trait Signer { + fn sign_block_proposal(&self, message: &[u8]) -> Option; + fn sign_randao_reveal(&self, message: &[u8]) -> Option; +} diff --git a/eth2/fork_choice/Cargo.toml b/eth2/fork_choice/Cargo.toml new file mode 100644 index 000000000..566334c76 --- /dev/null +++ b/eth2/fork_choice/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "fork_choice" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +db = { path = "../../beacon_node/db" } +ssz = { path = "../utils/ssz" } +types = { path = "../types" } +fast-math = "0.1.1" +byteorder = "1.3.1" diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs new file mode 100644 index 000000000..f79f7e8c1 --- /dev/null +++ b/eth2/fork_choice/src/lib.rs @@ -0,0 +1,118 @@ +// Copyright 2019 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! This crate stores the various implementations of fork-choice rules that can be used for the +//! beacon blockchain. +//! +//! There are four implementations. One is the naive longest chain rule (primarily for testing +//! purposes). The other three are proposed implementations of the LMD-GHOST fork-choice rule with various forms of optimisation. +//! +//! The current implementations are: +//! - [`longest-chain`]: Simplistic longest-chain fork choice - primarily for testing, **not for +//! production**. +//! - [`slow_lmd_ghost`]: This is a simple and very inefficient implementation given in the ethereum 2.0 +//! specifications (https://github.com/ethereum/eth2.0-specs/blob/v0.1/specs/core/0_beacon-chain.md#get_block_root). +//! - [`optimised_lmd_ghost`]: This is an optimised version of the naive implementation as proposed +//! by Vitalik. The reference implementation can be found at: https://github.com/ethereum/research/blob/master/ghost/ghost.py +//! - [`protolambda_lmd_ghost`]: Another optimised version of LMD-GHOST designed by @protolambda. +//! The go implementation can be found here: https://github.com/protolambda/lmd-ghost. +//! +//! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html +//! [`optimised_lmd_ghost`]: struct.OptimisedLmdGhost.html +//! [`protolambda_lmd_ghost`]: struct.ProtolambdaLmdGhost.html + +extern crate db; +extern crate ssz; +extern crate types; + +pub mod longest_chain; +pub mod optimised_lmd_ghost; +pub mod protolambda_lmd_ghost; +pub mod slow_lmd_ghost; + +use db::stores::BeaconBlockAtSlotError; +use db::DBError; +use types::{BeaconBlock, Hash256}; + +/// Defines the interface for Fork Choices. Each Fork choice will define their own data structures +/// which can be built in block processing through the `add_block` and `add_attestation` functions. +/// The main fork choice algorithm is specified in `find_head +pub trait ForkChoice: Send + Sync { + /// Called when a block has been added. Allows generic block-level data structures to be + /// built for a given fork-choice. + fn add_block( + &mut self, + block: &BeaconBlock, + block_hash: &Hash256, + ) -> Result<(), ForkChoiceError>; + /// Called when an attestation has been added. Allows generic attestation-level data structures to be built for a given fork choice. + // This can be generalised to a full attestation if required later. + fn add_attestation( + &mut self, + validator_index: u64, + target_block_hash: &Hash256, + ) -> Result<(), ForkChoiceError>; + /// The fork-choice algorithm to find the current canonical head of the chain. + // TODO: Remove the justified_start_block parameter and make it internal + fn find_head(&mut self, justified_start_block: &Hash256) -> Result; +} + +/// Possible fork choice errors that can occur. +#[derive(Debug, PartialEq)] +pub enum ForkChoiceError { + MissingBeaconBlock(Hash256), + MissingBeaconState(Hash256), + IncorrectBeaconState(Hash256), + CannotFindBestChild, + ChildrenNotFound, + StorageError(String), +} + +impl From for ForkChoiceError { + fn from(e: DBError) -> ForkChoiceError { + ForkChoiceError::StorageError(e.message) + } +} + +impl From for ForkChoiceError { + fn from(e: BeaconBlockAtSlotError) -> ForkChoiceError { + match e { + BeaconBlockAtSlotError::UnknownBeaconBlock(hash) => { + ForkChoiceError::MissingBeaconBlock(hash) + } + BeaconBlockAtSlotError::InvalidBeaconBlock(hash) => { + ForkChoiceError::MissingBeaconBlock(hash) + } + BeaconBlockAtSlotError::DBError(string) => ForkChoiceError::StorageError(string), + } + } +} + +/// Fork choice options that are currently implemented. +pub enum ForkChoiceAlgorithms { + /// Chooses the longest chain becomes the head. Not for production. + LongestChain, + /// A simple and highly inefficient implementation of LMD ghost. + SlowLMDGhost, + /// An optimised version of LMD-GHOST by Vitalik. + OptimisedLMDGhost, + /// An optimised version of LMD-GHOST by Protolambda. + ProtoLMDGhost, +} diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs new file mode 100644 index 000000000..277d6b950 --- /dev/null +++ b/eth2/fork_choice/src/longest_chain.rs @@ -0,0 +1,93 @@ +use db::stores::BeaconBlockStore; +use db::{ClientDB, DBError}; +use ssz::{Decodable, DecodeError}; +use std::sync::Arc; +use types::{BeaconBlock, Hash256, Slot}; + +pub enum ForkChoiceError { + BadSszInDatabase, + MissingBlock, + DBError(String), +} + +pub fn longest_chain( + head_block_hashes: &[Hash256], + block_store: &Arc>, +) -> Result, ForkChoiceError> +where + T: ClientDB + Sized, +{ + let mut head_blocks: Vec<(usize, BeaconBlock)> = vec![]; + + /* + * Load all the head_block hashes from the DB as SszBeaconBlocks. + */ + for (index, block_hash) in head_block_hashes.iter().enumerate() { + let ssz = block_store + .get(&block_hash)? + .ok_or(ForkChoiceError::MissingBlock)?; + let (block, _) = BeaconBlock::ssz_decode(&ssz, 0)?; + head_blocks.push((index, block)); + } + + /* + * Loop through all the head blocks and find the highest slot. + */ + let highest_slot: Option = None; + for (_, block) in &head_blocks { + let slot = block.slot; + + match highest_slot { + None => Some(slot), + Some(winning_slot) => { + if slot > winning_slot { + Some(slot) + } else { + Some(winning_slot) + } + } + }; + } + + /* + * Loop through all the highest blocks and sort them by highest hash. + * + * Ultimately, the index of the head_block hash with the highest slot and highest block + * hash will be the winner. + */ + match highest_slot { + None => Ok(None), + Some(highest_slot) => { + let mut highest_blocks = vec![]; + for (index, block) in head_blocks { + if block.slot == highest_slot { + highest_blocks.push((index, block)) + } + } + + highest_blocks.sort_by(|a, b| head_block_hashes[a.0].cmp(&head_block_hashes[b.0])); + let (index, _) = highest_blocks[0]; + Ok(Some(index)) + } + } +} + +impl From for ForkChoiceError { + fn from(_: DecodeError) -> Self { + ForkChoiceError::BadSszInDatabase + } +} + +impl From for ForkChoiceError { + fn from(e: DBError) -> Self { + ForkChoiceError::DBError(e.message) + } +} + +#[cfg(test)] +mod tests { + #[test] + fn test_naive_fork_choice() { + assert_eq!(2 + 2, 4); + } +} diff --git a/eth2/fork_choice/src/optimised_lmd_ghost.rs b/eth2/fork_choice/src/optimised_lmd_ghost.rs new file mode 100644 index 000000000..7104834cb --- /dev/null +++ b/eth2/fork_choice/src/optimised_lmd_ghost.rs @@ -0,0 +1,443 @@ +// Copyright 2019 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +extern crate byteorder; +extern crate fast_math; +use crate::{ForkChoice, ForkChoiceError}; +use byteorder::{BigEndian, ByteOrder}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, +}; +use fast_math::log2_raw; +use std::collections::HashMap; +use std::sync::Arc; +use types::{ + readers::BeaconBlockReader, + slot_epoch_height::{Height, Slot}, + validator_registry::get_active_validator_indices, + BeaconBlock, Hash256, +}; + +//TODO: Pruning - Children +//TODO: Handle Syncing + +//TODO: Sort out global constants +const GENESIS_SLOT: u64 = 0; +const FORK_CHOICE_BALANCE_INCREMENT: u64 = 1e9 as u64; +const MAX_DEPOSIT_AMOUNT: u64 = 32e9 as u64; +const EPOCH_LENGTH: u64 = 64; + +/// The optimised LMD-GHOST fork choice rule. +/// NOTE: This uses u32 to represent difference between block heights. Thus this is only +/// applicable for block height differences in the range of a u32. +/// This can potentially be parallelized in some parts. +// we use fast log2, a log2 lookup table is implemented in Vitaliks code, potentially do +// the comparison. Log2_raw takes 2ns according to the documentation. +#[inline] +fn log2_int(x: u32) -> u32 { + log2_raw(x as f32) as u32 +} + +fn power_of_2_below(x: u32) -> u32 { + 2u32.pow(log2_int(x)) +} + +/// Stores the necessary data structures to run the optimised lmd ghost algorithm. +pub struct OptimisedLMDGhost { + /// A cache of known ancestors at given heights for a specific block. + //TODO: Consider FnvHashMap + cache: HashMap, Hash256>, + /// Log lookup table for blocks to their ancestors. + //TODO: Verify we only want/need a size 16 log lookup + ancestors: Vec>, + /// Stores the children for any given parent. + children: HashMap>, + /// The latest attestation targets as a map of validator index to block hash. + //TODO: Could this be a fixed size vec + latest_attestation_targets: HashMap, + /// Block storage access. + block_store: Arc>, + /// State storage access. + state_store: Arc>, + max_known_height: Height, +} + +impl OptimisedLMDGhost +where + T: ClientDB + Sized, +{ + pub fn new( + block_store: Arc>, + state_store: Arc>, + ) -> Self { + OptimisedLMDGhost { + cache: HashMap::new(), + ancestors: vec![HashMap::new(); 16], + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + max_known_height: Height::new(0), + block_store, + state_store, + } + } + + /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to + /// weighted votes. + pub fn get_latest_votes( + &self, + state_root: &Hash256, + block_slot: Slot, + ) -> Result, ForkChoiceError> { + // get latest votes + // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // + // FORK_CHOICE_BALANCE_INCREMENT + // build a hashmap of block_hash to weighted votes + let mut latest_votes: HashMap = HashMap::new(); + // gets the current weighted votes + let current_state = self + .state_store + .get_deserialized(&state_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; + + let active_validator_indices = get_active_validator_indices( + ¤t_state.validator_registry, + block_slot.epoch(EPOCH_LENGTH), + ); + + for index in active_validator_indices { + let balance = + std::cmp::min(current_state.validator_balances[index], MAX_DEPOSIT_AMOUNT) + / FORK_CHOICE_BALANCE_INCREMENT; + if balance > 0 { + if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { + *latest_votes.entry(*target).or_insert_with(|| 0) += balance; + } + } + } + + Ok(latest_votes) + } + + /// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`. + fn get_ancestor(&mut self, block_hash: Hash256, at_height: Height) -> Option { + // return None if we can't get the block from the db. + let block_height = { + let block_slot = self + .block_store + .get_deserialized(&block_hash) + .ok()? + .expect("Should have returned already if None") + .slot; + + block_slot.height(Slot::from(GENESIS_SLOT)) + }; + + // verify we haven't exceeded the block height + if at_height >= block_height { + if at_height > block_height { + return None; + } else { + return Some(block_hash); + } + } + // check if the result is stored in our cache + let cache_key = CacheKey::new(&block_hash, at_height.as_u32()); + if let Some(ancestor) = self.cache.get(&cache_key) { + return Some(*ancestor); + } + + // not in the cache recursively search for ancestors using a log-lookup + + if let Some(ancestor) = { + let ancestor_lookup = self.ancestors + [log2_int((block_height - at_height - 1u64).as_u32()) as usize] + .get(&block_hash) + //TODO: Panic if we can't lookup and fork choice fails + .expect("All blocks should be added to the ancestor log lookup table"); + self.get_ancestor(*ancestor_lookup, at_height) + } { + // add the result to the cache + self.cache.insert(cache_key, ancestor); + return Some(ancestor); + } + + None + } + + // looks for an obvious block winner given the latest votes for a specific height + fn get_clear_winner( + &mut self, + latest_votes: &HashMap, + block_height: Height, + ) -> Option { + // map of vote counts for every hash at this height + let mut current_votes: HashMap = HashMap::new(); + let mut total_vote_count = 0; + + // loop through the latest votes and count all votes + // these have already been weighted by balance + for (hash, votes) in latest_votes.iter() { + if let Some(ancestor) = self.get_ancestor(*hash, block_height) { + let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); + current_votes.insert(ancestor, current_vote_value + *votes); + total_vote_count += votes; + } + } + // Check if there is a clear block winner at this height. If so return it. + for (hash, votes) in current_votes.iter() { + if *votes >= total_vote_count / 2 { + // we have a clear winner, return it + return Some(*hash); + } + } + // didn't find a clear winner + None + } + + // Finds the best child, splitting children into a binary tree, based on their hashes + fn choose_best_child(&self, votes: &HashMap) -> Option { + let mut bitmask = 0; + for bit in (0..=255).rev() { + let mut zero_votes = 0; + let mut one_votes = 0; + let mut single_candidate = None; + + for (candidate, votes) in votes.iter() { + let candidate_uint = BigEndian::read_u32(candidate); + if candidate_uint >> (bit + 1) != bitmask { + continue; + } + if (candidate_uint >> bit) % 2 == 0 { + zero_votes += votes; + } else { + one_votes += votes; + } + + if single_candidate.is_none() { + single_candidate = Some(candidate); + } else { + single_candidate = None; + } + } + bitmask = (bitmask * 2) + { + if one_votes > zero_votes { + 1 + } else { + 0 + } + }; + if let Some(candidate) = single_candidate { + return Some(*candidate); + } + //TODO Remove this during benchmark after testing + assert!(bit >= 1); + } + // should never reach here + None + } +} + +impl ForkChoice for OptimisedLMDGhost { + fn add_block( + &mut self, + block: &BeaconBlock, + block_hash: &Hash256, + ) -> Result<(), ForkChoiceError> { + // get the height of the parent + let parent_height = self + .block_store + .get_deserialized(&block.parent_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + + let parent_hash = &block.parent_root; + + // add the new block to the children of parent + (*self + .children + .entry(block.parent_root) + .or_insert_with(|| vec![])) + .push(block_hash.clone()); + + // build the ancestor data structure + for index in 0..16 { + if parent_height % (1 << index) == 0 { + self.ancestors[index].insert(*block_hash, *parent_hash); + } else { + // TODO: This is unsafe. Will panic if parent_hash doesn't exist. Using it for debugging + let parent_ancestor = self.ancestors[index][parent_hash]; + self.ancestors[index].insert(*block_hash, parent_ancestor); + } + } + // update the max height + self.max_known_height = std::cmp::max(self.max_known_height, parent_height + 1); + Ok(()) + } + + fn add_attestation( + &mut self, + validator_index: u64, + target_block_root: &Hash256, + ) -> Result<(), ForkChoiceError> { + // simply add the attestation to the latest_attestation_target if the block_height is + // larger + let attestation_target = self + .latest_attestation_targets + .entry(validator_index) + .or_insert_with(|| *target_block_root); + // if we already have a value + if attestation_target != target_block_root { + // get the height of the target block + let block_height = self + .block_store + .get_deserialized(&target_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + + // get the height of the past target block + let past_block_height = self + .block_store + .get_deserialized(&attestation_target)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + // update the attestation only if the new target is higher + if past_block_height < block_height { + *attestation_target = *target_block_root; + } + } + Ok(()) + } + + /// Perform lmd_ghost on the current chain to find the head. + fn find_head(&mut self, justified_block_start: &Hash256) -> Result { + let block = self + .block_store + .get_deserialized(&justified_block_start)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; + + let block_slot = block.slot(); + let block_height = block_slot.height(Slot::from(GENESIS_SLOT)); + let state_root = block.state_root(); + + let mut current_head = *justified_block_start; + + let mut latest_votes = self.get_latest_votes(&state_root, block_slot)?; + + // remove any votes that don't relate to our current head. + latest_votes.retain(|hash, _| self.get_ancestor(*hash, block_height) == Some(current_head)); + + // begin searching for the head + loop { + // if there are no children, we are done, return the current_head + let children = match self.children.get(¤t_head) { + Some(children) => children.clone(), + None => return Ok(current_head), + }; + + // logarithmic lookup blocks to see if there are obvious winners, if so, + // progress to the next iteration. + let mut step = + power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u32()) / 2; + while step > 0 { + if let Some(clear_winner) = self.get_clear_winner( + &latest_votes, + block_height - (block_height % u64::from(step)) + u64::from(step), + ) { + current_head = clear_winner; + break; + } + step /= 2; + } + if step > 0 { + } + // if our skip lookup failed and we only have one child, progress to that child + else if children.len() == 1 { + current_head = children[0]; + } + // we need to find the best child path to progress down. + else { + let mut child_votes = HashMap::new(); + for (voted_hash, vote) in latest_votes.iter() { + // if the latest votes correspond to a child + if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1) { + // add up the votes for each child + *child_votes.entry(child).or_insert_with(|| 0) += vote; + } + } + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + } + + // No head was found, re-iterate + + // update the block height for the next iteration + let block_height = self + .block_store + .get_deserialized(¤t_head)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + + // prune the latest votes for votes that are not part of current chosen chain + // more specifically, only keep votes that have head as an ancestor + latest_votes + .retain(|hash, _| self.get_ancestor(*hash, block_height) == Some(current_head)); + } + } +} + +/// Type for storing blocks in a memory cache. Key is comprised of block-hash plus the height. +#[derive(PartialEq, Eq, Hash)] +pub struct CacheKey { + block_hash: Hash256, + block_height: T, +} + +impl CacheKey { + pub fn new(block_hash: &Hash256, block_height: T) -> Self { + CacheKey { + block_hash: *block_hash, + block_height, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn test_power_of_2_below() { + println!("{:?}", std::f32::MAX); + assert_eq!(power_of_2_below(4), 4); + assert_eq!(power_of_2_below(5), 4); + assert_eq!(power_of_2_below(7), 4); + assert_eq!(power_of_2_below(24), 16); + assert_eq!(power_of_2_below(32), 32); + assert_eq!(power_of_2_below(33), 32); + assert_eq!(power_of_2_below(63), 32); + } +} diff --git a/eth2/fork_choice/src/protolambda_lmd_ghost.rs b/eth2/fork_choice/src/protolambda_lmd_ghost.rs new file mode 100644 index 000000000..e69de29bb diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs new file mode 100644 index 000000000..e0e347cef --- /dev/null +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -0,0 +1,223 @@ +// Copyright 2019 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +extern crate db; + +use crate::{ForkChoice, ForkChoiceError}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, +}; +use std::collections::HashMap; +use std::sync::Arc; +use types::{ + readers::{BeaconBlockReader, BeaconStateReader}, + slot_epoch_height::Slot, + validator_registry::get_active_validator_indices, + BeaconBlock, Hash256, +}; + +//TODO: Pruning and syncing + +//TODO: Sort out global constants +const GENESIS_SLOT: u64 = 0; +const FORK_CHOICE_BALANCE_INCREMENT: u64 = 1e9 as u64; +const MAX_DEPOSIT_AMOUNT: u64 = 32e9 as u64; +const EPOCH_LENGTH: u64 = 64; + +pub struct SlowLMDGhost { + /// The latest attestation targets as a map of validator index to block hash. + //TODO: Could this be a fixed size vec + latest_attestation_targets: HashMap, + /// Stores the children for any given parent. + children: HashMap>, + /// Block storage access. + block_store: Arc>, + /// State storage access. + state_store: Arc>, +} + +impl SlowLMDGhost +where + T: ClientDB + Sized, +{ + pub fn new(block_store: BeaconBlockStore, state_store: BeaconStateStore) -> Self { + SlowLMDGhost { + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + block_store: Arc::new(block_store), + state_store: Arc::new(state_store), + } + } + + /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to + /// weighted votes. + pub fn get_latest_votes( + &self, + state_root: &Hash256, + block_slot: Slot, + ) -> Result, ForkChoiceError> { + // get latest votes + // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // + // FORK_CHOICE_BALANCE_INCREMENT + // build a hashmap of block_hash to weighted votes + let mut latest_votes: HashMap = HashMap::new(); + // gets the current weighted votes + let current_state = self + .state_store + .get_deserialized(&state_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; + + let active_validator_indices = get_active_validator_indices( + ¤t_state.validator_registry, + block_slot.epoch(EPOCH_LENGTH), + ); + + for index in active_validator_indices { + let balance = + std::cmp::min(current_state.validator_balances[index], MAX_DEPOSIT_AMOUNT) + / FORK_CHOICE_BALANCE_INCREMENT; + if balance > 0 { + if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { + *latest_votes.entry(*target).or_insert_with(|| 0) += balance; + } + } + } + + Ok(latest_votes) + } + + /// Get the total number of votes for some given block root. + /// + /// The vote count is incremented each time an attestation target votes for a block root. + fn get_vote_count( + &self, + latest_votes: &HashMap, + block_root: &Hash256, + ) -> Result { + let mut count = 0; + let block_slot = self + .block_store + .get_deserialized(&block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? + .slot(); + + for (target_hash, votes) in latest_votes.iter() { + let (root_at_slot, _) = self + .block_store + .block_at_slot(&block_root, block_slot)? + .ok_or(ForkChoiceError::MissingBeaconBlock(*block_root))?; + if root_at_slot == *target_hash { + count += votes; + } + } + Ok(count) + } +} + +impl ForkChoice for SlowLMDGhost { + /// Process when a block is added + fn add_block( + &mut self, + block: &BeaconBlock, + block_hash: &Hash256, + ) -> Result<(), ForkChoiceError> { + // build the children hashmap + // add the new block to the children of parent + (*self + .children + .entry(block.parent_root) + .or_insert_with(|| vec![])) + .push(block_hash.clone()); + + // complete + Ok(()) + } + + fn add_attestation( + &mut self, + validator_index: u64, + target_block_root: &Hash256, + ) -> Result<(), ForkChoiceError> { + // simply add the attestation to the latest_attestation_target if the block_height is + // larger + let attestation_target = self + .latest_attestation_targets + .entry(validator_index) + .or_insert_with(|| *target_block_root); + // if we already have a value + if attestation_target != target_block_root { + // get the height of the target block + let block_height = self + .block_store + .get_deserialized(&target_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + + // get the height of the past target block + let past_block_height = self + .block_store + .get_deserialized(&attestation_target)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? + .slot() + .height(Slot::from(GENESIS_SLOT)); + // update the attestation only if the new target is higher + if past_block_height < block_height { + *attestation_target = *target_block_root; + } + } + Ok(()) + } + + /// A very inefficient implementation of LMD ghost. + fn find_head(&mut self, justified_block_start: &Hash256) -> Result { + let start = self + .block_store + .get_deserialized(&justified_block_start)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; + + let start_state_root = start.state_root(); + + let latest_votes = self.get_latest_votes(&start_state_root, start.slot())?; + + let mut head_hash = Hash256::zero(); + + loop { + let mut head_vote_count = 0; + + let children = match self.children.get(&head_hash) { + Some(children) => children, + // we have found the head, exit + None => break, + }; + + for child_hash in children { + let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; + + if vote_count > head_vote_count { + head_hash = *child_hash; + head_vote_count = vote_count; + } + } + } + Ok(head_hash) + } +} diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml new file mode 100644 index 000000000..683475f47 --- /dev/null +++ b/eth2/state_processing/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "state_processing" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +hashing = { path = "../utils/hashing" } +integer-sqrt = "0.1" +log = "0.4" +ssz = { path = "../utils/ssz" } +types = { path = "../types" } +rayon = "1.0" diff --git a/eth2/state_processing/src/block_processable.rs b/eth2/state_processing/src/block_processable.rs new file mode 100644 index 000000000..f043a723d --- /dev/null +++ b/eth2/state_processing/src/block_processable.rs @@ -0,0 +1,403 @@ +use crate::SlotProcessingError; +use hashing::hash; +use log::debug; +use ssz::{ssz_encode, TreeHash}; +use types::{ + beacon_state::{AttestationValidationError, CommitteesError}, + AggregatePublicKey, Attestation, BeaconBlock, BeaconState, ChainSpec, Crosslink, Epoch, Exit, + Fork, Hash256, PendingAttestation, PublicKey, Signature, +}; + +// TODO: define elsehwere. +const DOMAIN_PROPOSAL: u64 = 2; +const DOMAIN_EXIT: u64 = 3; +const DOMAIN_RANDAO: u64 = 4; +const PHASE_0_CUSTODY_BIT: bool = false; +const DOMAIN_ATTESTATION: u64 = 1; + +#[derive(Debug, PartialEq)] +pub enum Error { + DBError(String), + StateAlreadyTransitioned, + PresentSlotIsNone, + UnableToDecodeBlock, + MissingParentState(Hash256), + InvalidParentState(Hash256), + MissingBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), + MissingParentBlock(Hash256), + NoBlockProducer, + StateSlotMismatch, + BadBlockSignature, + BadRandaoSignature, + MaxProposerSlashingsExceeded, + BadProposerSlashing, + MaxAttestationsExceeded, + InvalidAttestation(AttestationValidationError), + NoBlockRoot, + MaxDepositsExceeded, + MaxExitsExceeded, + BadExit, + BadCustodyReseeds, + BadCustodyChallenges, + BadCustodyResponses, + CommitteesError(CommitteesError), + SlotProcessingError(SlotProcessingError), +} + +macro_rules! ensure { + ($condition: expr, $result: expr) => { + if !$condition { + return Err($result); + } + }; +} + +pub trait BlockProcessable { + fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error>; + fn per_block_processing_without_verifying_block_signature( + &mut self, + block: &BeaconBlock, + spec: &ChainSpec, + ) -> Result<(), Error>; +} + +impl BlockProcessable for BeaconState { + fn per_block_processing(&mut self, block: &BeaconBlock, spec: &ChainSpec) -> Result<(), Error> { + per_block_processing_signature_optional(self, block, true, spec) + } + + fn per_block_processing_without_verifying_block_signature( + &mut self, + block: &BeaconBlock, + spec: &ChainSpec, + ) -> Result<(), Error> { + per_block_processing_signature_optional(self, block, false, spec) + } +} + +fn per_block_processing_signature_optional( + state: &mut BeaconState, + block: &BeaconBlock, + verify_block_signature: bool, + spec: &ChainSpec, +) -> Result<(), Error> { + ensure!(block.slot == state.slot, Error::StateSlotMismatch); + + /* + * Proposer Signature + */ + let block_proposer_index = state + .get_beacon_proposer_index(block.slot, spec) + .map_err(|_| Error::NoBlockProducer)?; + let block_proposer = &state.validator_registry[block_proposer_index]; + + if verify_block_signature { + ensure!( + bls_verify( + &block_proposer.pubkey, + &block.proposal_root(spec)[..], + &block.signature, + get_domain(&state.fork, state.current_epoch(spec), DOMAIN_PROPOSAL) + ), + Error::BadBlockSignature + ); + } + + /* + * RANDAO + */ + ensure!( + bls_verify( + &block_proposer.pubkey, + &ssz_encode(&state.current_epoch(spec)), + &block.randao_reveal, + get_domain(&state.fork, state.current_epoch(spec), DOMAIN_RANDAO) + ), + Error::BadRandaoSignature + ); + + // TODO: check this is correct. + let new_mix = { + let mut mix = state.latest_randao_mixes + [state.slot.as_usize() % spec.latest_randao_mixes_length] + .to_vec(); + mix.append(&mut ssz_encode(&block.randao_reveal)); + Hash256::from(&hash(&mix)[..]) + }; + + state.latest_randao_mixes[state.slot.as_usize() % spec.latest_randao_mixes_length] = new_mix; + + /* + * Eth1 data + */ + // TODO: Eth1 data processing. + + /* + * Proposer slashings + */ + ensure!( + block.body.proposer_slashings.len() as u64 <= spec.max_proposer_slashings, + Error::MaxProposerSlashingsExceeded + ); + for proposer_slashing in &block.body.proposer_slashings { + let proposer = state + .validator_registry + .get(proposer_slashing.proposer_index as usize) + .ok_or(Error::BadProposerSlashing)?; + ensure!( + proposer_slashing.proposal_data_1.slot == proposer_slashing.proposal_data_2.slot, + Error::BadProposerSlashing + ); + ensure!( + proposer_slashing.proposal_data_1.shard == proposer_slashing.proposal_data_2.shard, + Error::BadProposerSlashing + ); + ensure!( + proposer_slashing.proposal_data_1.block_root + != proposer_slashing.proposal_data_2.block_root, + Error::BadProposerSlashing + ); + ensure!( + proposer.penalized_epoch > state.current_epoch(spec), + Error::BadProposerSlashing + ); + ensure!( + bls_verify( + &proposer.pubkey, + &proposer_slashing.proposal_data_1.hash_tree_root(), + &proposer_slashing.proposal_signature_1, + get_domain( + &state.fork, + proposer_slashing + .proposal_data_1 + .slot + .epoch(spec.epoch_length), + DOMAIN_PROPOSAL + ) + ), + Error::BadProposerSlashing + ); + ensure!( + bls_verify( + &proposer.pubkey, + &proposer_slashing.proposal_data_2.hash_tree_root(), + &proposer_slashing.proposal_signature_2, + get_domain( + &state.fork, + proposer_slashing + .proposal_data_2 + .slot + .epoch(spec.epoch_length), + DOMAIN_PROPOSAL + ) + ), + Error::BadProposerSlashing + ); + state.penalize_validator(proposer_slashing.proposer_index as usize, spec)?; + } + + /* + * Attestations + */ + ensure!( + block.body.attestations.len() as u64 <= spec.max_attestations, + Error::MaxAttestationsExceeded + ); + + for attestation in &block.body.attestations { + validate_attestation(&state, attestation, spec)?; + + let pending_attestation = PendingAttestation { + data: attestation.data.clone(), + aggregation_bitfield: attestation.aggregation_bitfield.clone(), + custody_bitfield: attestation.custody_bitfield.clone(), + inclusion_slot: state.slot, + }; + state.latest_attestations.push(pending_attestation); + } + + debug!( + "{} attestations verified & processed.", + block.body.attestations.len() + ); + + /* + * Deposits + */ + ensure!( + block.body.deposits.len() as u64 <= spec.max_deposits, + Error::MaxDepositsExceeded + ); + + // TODO: process deposits. + + /* + * Exits + */ + ensure!( + block.body.exits.len() as u64 <= spec.max_exits, + Error::MaxExitsExceeded + ); + + for exit in &block.body.exits { + let validator = state + .validator_registry + .get(exit.validator_index as usize) + .ok_or(Error::BadExit)?; + ensure!( + validator.exit_epoch + > state.get_entry_exit_effect_epoch(state.current_epoch(spec), spec), + Error::BadExit + ); + ensure!(state.current_epoch(spec) >= exit.epoch, Error::BadExit); + let exit_message = { + let exit_struct = Exit { + epoch: exit.epoch, + validator_index: exit.validator_index, + signature: spec.empty_signature.clone(), + }; + exit_struct.hash_tree_root() + }; + ensure!( + bls_verify( + &validator.pubkey, + &exit_message, + &exit.signature, + get_domain(&state.fork, exit.epoch, DOMAIN_EXIT) + ), + Error::BadProposerSlashing + ); + state.initiate_validator_exit(exit.validator_index as usize); + } + + debug!("State transition complete."); + + Ok(()) +} + +pub fn validate_attestation( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), AttestationValidationError> { + validate_attestation_signature_optional(state, attestation, spec, true) +} + +pub fn validate_attestation_without_signature( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), AttestationValidationError> { + validate_attestation_signature_optional(state, attestation, spec, false) +} + +fn validate_attestation_signature_optional( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: bool, +) -> Result<(), AttestationValidationError> { + ensure!( + attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, + AttestationValidationError::IncludedTooEarly + ); + ensure!( + attestation.data.slot + spec.epoch_length >= state.slot, + AttestationValidationError::IncludedTooLate + ); + if attestation.data.slot >= state.current_epoch_start_slot(spec) { + ensure!( + attestation.data.justified_epoch == state.justified_epoch, + AttestationValidationError::WrongJustifiedSlot + ); + } else { + ensure!( + attestation.data.justified_epoch == state.previous_justified_epoch, + AttestationValidationError::WrongJustifiedSlot + ); + } + ensure!( + attestation.data.justified_block_root + == *state + .get_block_root( + attestation + .data + .justified_epoch + .start_slot(spec.epoch_length), + &spec + ) + .ok_or(AttestationValidationError::NoBlockRoot)?, + AttestationValidationError::WrongJustifiedRoot + ); + let potential_crosslink = Crosslink { + shard_block_root: attestation.data.shard_block_root, + epoch: attestation.data.slot.epoch(spec.epoch_length), + }; + ensure!( + (attestation.data.latest_crosslink + == state.latest_crosslinks[attestation.data.shard as usize]) + | (attestation.data.latest_crosslink == potential_crosslink), + AttestationValidationError::BadLatestCrosslinkRoot + ); + if verify_signature { + let participants = state.get_attestation_participants( + &attestation.data, + &attestation.aggregation_bitfield, + spec, + )?; + let mut group_public_key = AggregatePublicKey::new(); + for participant in participants { + group_public_key.add( + state.validator_registry[participant as usize] + .pubkey + .as_raw(), + ) + } + ensure!( + attestation.verify_signature( + &group_public_key, + PHASE_0_CUSTODY_BIT, + get_domain( + &state.fork, + attestation.data.slot.epoch(spec.epoch_length), + DOMAIN_ATTESTATION, + ) + ), + AttestationValidationError::BadSignature + ); + } + ensure!( + attestation.data.shard_block_root == spec.zero_hash, + AttestationValidationError::ShardBlockRootNotZero + ); + Ok(()) +} + +fn get_domain(_fork: &Fork, _epoch: Epoch, _domain_type: u64) -> u64 { + // TODO: stubbed out. + 0 +} + +fn bls_verify(pubkey: &PublicKey, message: &[u8], signature: &Signature, _domain: u64) -> bool { + // TODO: add domain + signature.verify(message, pubkey) +} + +impl From for Error { + fn from(e: AttestationValidationError) -> Error { + Error::InvalidAttestation(e) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} + +impl From for Error { + fn from(e: SlotProcessingError) -> Error { + Error::SlotProcessingError(e) + } +} diff --git a/eth2/state_processing/src/epoch_processable.rs b/eth2/state_processing/src/epoch_processable.rs new file mode 100644 index 000000000..aece61184 --- /dev/null +++ b/eth2/state_processing/src/epoch_processable.rs @@ -0,0 +1,716 @@ +use integer_sqrt::IntegerSquareRoot; +use log::{debug, trace}; +use rayon::prelude::*; +use ssz::TreeHash; +use std::collections::{HashMap, HashSet}; +use std::iter::FromIterator; +use types::{ + beacon_state::{AttestationParticipantsError, CommitteesError, InclusionError}, + validator_registry::get_active_validator_indices, + BeaconState, ChainSpec, Crosslink, Epoch, Hash256, PendingAttestation, +}; + +macro_rules! safe_add_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_add($b); + }; +} +macro_rules! safe_sub_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_sub($b); + }; +} + +#[derive(Debug, PartialEq)] +pub enum Error { + UnableToDetermineProducer, + NoBlockRoots, + BaseRewardQuotientIsZero, + NoRandaoSeed, + CommitteesError(CommitteesError), + AttestationParticipantsError(AttestationParticipantsError), + InclusionError(InclusionError), + WinningRootError(WinningRootError), +} + +#[derive(Debug, PartialEq)] +pub enum WinningRootError { + NoWinningRoot, + AttestationParticipantsError(AttestationParticipantsError), +} + +#[derive(Clone)] +pub struct WinningRoot { + pub shard_block_root: Hash256, + pub attesting_validator_indices: Vec, + pub total_balance: u64, + pub total_attesting_balance: u64, +} + +pub trait EpochProcessable { + fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error>; +} + +impl EpochProcessable for BeaconState { + // Cyclomatic complexity is ignored. It would be ideal to split this function apart, however it + // remains monolithic to allow for easier spec updates. Once the spec is more stable we can + // optimise. + #[allow(clippy::cyclomatic_complexity)] + fn per_epoch_processing(&mut self, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = self.current_epoch(spec); + let previous_epoch = self.previous_epoch(spec); + let next_epoch = self.next_epoch(spec); + + debug!( + "Starting per-epoch processing on epoch {}...", + self.current_epoch(spec) + ); + + /* + * Validators attesting during the current epoch. + */ + let active_validator_indices = get_active_validator_indices( + &self.validator_registry, + self.slot.epoch(spec.epoch_length), + ); + let current_total_balance = self.get_total_balance(&active_validator_indices[..], spec); + + trace!( + "{} validators with a total balance of {} wei.", + active_validator_indices.len(), + current_total_balance + ); + + let current_epoch_attestations: Vec<&PendingAttestation> = self + .latest_attestations + .par_iter() + .filter(|a| { + (a.data.slot / spec.epoch_length).epoch(spec.epoch_length) + == self.current_epoch(spec) + }) + .collect(); + + trace!( + "Current epoch attestations: {}", + current_epoch_attestations.len() + ); + + let current_epoch_boundary_attestations: Vec<&PendingAttestation> = + current_epoch_attestations + .par_iter() + .filter( + |a| match self.get_block_root(self.current_epoch_start_slot(spec), spec) { + Some(block_root) => { + (a.data.epoch_boundary_root == *block_root) + && (a.data.justified_epoch == self.justified_epoch) + } + None => unreachable!(), + }, + ) + .cloned() + .collect(); + + let current_epoch_boundary_attester_indices = self + .get_attestation_participants_union(¤t_epoch_boundary_attestations[..], spec)?; + let current_epoch_boundary_attesting_balance = + self.get_total_balance(¤t_epoch_boundary_attester_indices[..], spec); + + trace!( + "Current epoch boundary attesters: {}", + current_epoch_boundary_attester_indices.len() + ); + + /* + * Validators attesting during the previous epoch + */ + + /* + * Validators that made an attestation during the previous epoch + */ + let previous_epoch_attestations: Vec<&PendingAttestation> = self + .latest_attestations + .par_iter() + .filter(|a| { + //TODO: ensure these saturating subs are correct. + (a.data.slot / spec.epoch_length).epoch(spec.epoch_length) + == self.previous_epoch(spec) + }) + .collect(); + + debug!( + "previous epoch attestations: {}", + previous_epoch_attestations.len() + ); + + let previous_epoch_attester_indices = + self.get_attestation_participants_union(&previous_epoch_attestations[..], spec)?; + let previous_total_balance = + self.get_total_balance(&previous_epoch_attester_indices[..], spec); + + /* + * Validators targetting the previous justified slot + */ + let previous_epoch_justified_attestations: Vec<&PendingAttestation> = { + let mut a: Vec<&PendingAttestation> = current_epoch_attestations + .iter() + .filter(|a| a.data.justified_epoch == self.previous_justified_epoch) + .cloned() + .collect(); + let mut b: Vec<&PendingAttestation> = previous_epoch_attestations + .iter() + .filter(|a| a.data.justified_epoch == self.previous_justified_epoch) + .cloned() + .collect(); + a.append(&mut b); + a + }; + + let previous_epoch_justified_attester_indices = self + .get_attestation_participants_union(&previous_epoch_justified_attestations[..], spec)?; + let previous_epoch_justified_attesting_balance = + self.get_total_balance(&previous_epoch_justified_attester_indices[..], spec); + + /* + * Validators justifying the epoch boundary block at the start of the previous epoch + */ + let previous_epoch_boundary_attestations: Vec<&PendingAttestation> = + previous_epoch_justified_attestations + .iter() + .filter( + |a| match self.get_block_root(self.previous_epoch_start_slot(spec), spec) { + Some(block_root) => a.data.epoch_boundary_root == *block_root, + None => unreachable!(), + }, + ) + .cloned() + .collect(); + + let previous_epoch_boundary_attester_indices = self + .get_attestation_participants_union(&previous_epoch_boundary_attestations[..], spec)?; + let previous_epoch_boundary_attesting_balance = + self.get_total_balance(&previous_epoch_boundary_attester_indices[..], spec); + + /* + * Validators attesting to the expected beacon chain head during the previous epoch. + */ + let previous_epoch_head_attestations: Vec<&PendingAttestation> = + previous_epoch_attestations + .iter() + .filter(|a| match self.get_block_root(a.data.slot, spec) { + Some(block_root) => a.data.beacon_block_root == *block_root, + None => unreachable!(), + }) + .cloned() + .collect(); + + let previous_epoch_head_attester_indices = + self.get_attestation_participants_union(&previous_epoch_head_attestations[..], spec)?; + let previous_epoch_head_attesting_balance = + self.get_total_balance(&previous_epoch_head_attester_indices[..], spec); + + debug!( + "previous_epoch_head_attester_balance of {} wei.", + previous_epoch_head_attesting_balance + ); + + /* + * Eth1 Data + */ + if self.next_epoch(spec) % spec.eth1_data_voting_period == 0 { + for eth1_data_vote in &self.eth1_data_votes { + if eth1_data_vote.vote_count * 2 > spec.eth1_data_voting_period { + self.latest_eth1_data = eth1_data_vote.eth1_data.clone(); + } + } + self.eth1_data_votes = vec![]; + } + + /* + * Justification + */ + + let mut new_justified_epoch = self.justified_epoch; + self.justification_bitfield <<= 1; + + // If > 2/3 of the total balance attested to the previous epoch boundary + // + // - Set the 2nd bit of the bitfield. + // - Set the previous epoch to be justified. + if (3 * previous_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { + self.justification_bitfield |= 2; + new_justified_epoch = previous_epoch; + trace!(">= 2/3 voted for previous epoch boundary"); + } + // If > 2/3 of the total balance attested to the previous epoch boundary + // + // - Set the 1st bit of the bitfield. + // - Set the current epoch to be justified. + if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { + self.justification_bitfield |= 1; + new_justified_epoch = current_epoch; + trace!(">= 2/3 voted for current epoch boundary"); + } + + // If: + // + // - All three epochs prior to this epoch have been justified. + // - The previous justified justified epoch was three epochs ago. + // + // Then, set the finalized epoch to be three epochs ago. + if ((self.justification_bitfield >> 1) % 8 == 0b111) + & (self.previous_justified_epoch == previous_epoch - 2) + { + self.finalized_epoch = self.previous_justified_epoch; + trace!("epoch - 3 was finalized (1st condition)."); + } + // If: + // + // - Both two epochs prior to this epoch have been justified. + // - The previous justified epoch was two epochs ago. + // + // Then, set the finalized epoch to two epochs ago. + if ((self.justification_bitfield >> 1) % 4 == 0b11) + & (self.previous_justified_epoch == previous_epoch - 1) + { + self.finalized_epoch = self.previous_justified_epoch; + trace!("epoch - 2 was finalized (2nd condition)."); + } + // If: + // + // - This epoch and the two prior have been justified. + // - The presently justified epoch was two epochs ago. + // + // Then, set the finalized epoch to two epochs ago. + if (self.justification_bitfield % 8 == 0b111) & (self.justified_epoch == previous_epoch - 1) + { + self.finalized_epoch = self.justified_epoch; + trace!("epoch - 2 was finalized (3rd condition)."); + } + // If: + // + // - This epoch and the epoch prior to it have been justified. + // - Set the previous epoch to be justified. + // + // Then, set the finalized epoch to be the previous epoch. + if (self.justification_bitfield % 4 == 0b11) & (self.justified_epoch == previous_epoch) { + self.finalized_epoch = self.justified_epoch; + trace!("epoch - 1 was finalized (4th condition)."); + } + + self.previous_justified_epoch = self.justified_epoch; + self.justified_epoch = new_justified_epoch; + + debug!( + "Finalized epoch {}, justified epoch {}.", + self.finalized_epoch, self.justified_epoch + ); + + /* + * Crosslinks + */ + + // Cached for later lookups. + let mut winning_root_for_shards: HashMap> = + HashMap::new(); + + // for slot in self.slot.saturating_sub(2 * spec.epoch_length)..self.slot { + for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) { + let crosslink_committees_at_slot = + self.get_crosslink_committees_at_slot(slot, false, spec)?; + + for (crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + let winning_root = winning_root( + self, + shard, + ¤t_epoch_attestations, + &previous_epoch_attestations, + spec, + ); + + if let Ok(winning_root) = &winning_root { + let total_committee_balance = + self.get_total_balance(&crosslink_committee[..], spec); + + if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { + self.latest_crosslinks[shard as usize] = Crosslink { + epoch: current_epoch, + shard_block_root: winning_root.shard_block_root, + } + } + } + winning_root_for_shards.insert(shard, winning_root); + } + } + + trace!( + "Found {} winning shard roots.", + winning_root_for_shards.len() + ); + + /* + * Rewards and Penalities + */ + let base_reward_quotient = previous_total_balance.integer_sqrt(); + if base_reward_quotient == 0 { + return Err(Error::BaseRewardQuotientIsZero); + } + + /* + * Justification and finalization + */ + let epochs_since_finality = next_epoch - self.finalized_epoch; + + let previous_epoch_justified_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_justified_attester_indices.iter().cloned()); + let previous_epoch_boundary_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_boundary_attester_indices.iter().cloned()); + let previous_epoch_head_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_head_attester_indices.iter().cloned()); + let previous_epoch_attester_indices_hashset: HashSet = + HashSet::from_iter(previous_epoch_attester_indices.iter().cloned()); + let active_validator_indices_hashset: HashSet = + HashSet::from_iter(active_validator_indices.iter().cloned()); + + debug!("previous epoch justified attesters: {}, previous epoch boundary attesters: {}, previous epoch head attesters: {}, previous epoch attesters: {}", previous_epoch_justified_attester_indices.len(), previous_epoch_boundary_attester_indices.len(), previous_epoch_head_attester_indices.len(), previous_epoch_attester_indices.len()); + + debug!("{} epochs since finality.", epochs_since_finality); + + if epochs_since_finality <= 4 { + for index in 0..self.validator_balances.len() { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + + if previous_epoch_justified_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_justified_attesting_balance + / previous_total_balance + ); + } else if active_validator_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + + if previous_epoch_boundary_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_boundary_attesting_balance + / previous_total_balance + ); + } else if active_validator_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + + if previous_epoch_head_attester_indices_hashset.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * previous_epoch_head_attesting_balance + / previous_total_balance + ); + } else if active_validator_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + } + + for index in previous_epoch_attester_indices { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + self.inclusion_distance(&previous_epoch_attestations, index, spec)?; + + safe_add_assign!( + self.validator_balances[index], + base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ) + } + } else { + for index in 0..self.validator_balances.len() { + let inactivity_penalty = self.inactivity_penalty( + index, + epochs_since_finality, + base_reward_quotient, + spec, + ); + if active_validator_indices_hashset.contains(&index) { + if !previous_epoch_justified_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + if !previous_epoch_boundary_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + if !previous_epoch_head_attester_indices_hashset.contains(&index) { + safe_sub_assign!(self.validator_balances[index], inactivity_penalty); + } + + if self.validator_registry[index].penalized_epoch <= current_epoch { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + safe_sub_assign!( + self.validator_balances[index], + 2 * inactivity_penalty + base_reward + ); + } + } + } + + for index in previous_epoch_attester_indices { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + self.inclusion_distance(&previous_epoch_attestations, index, spec)?; + + safe_sub_assign!( + self.validator_balances[index], + base_reward + - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance + ); + } + } + + trace!("Processed validator justification and finalization rewards/penalities."); + + /* + * Attestation inclusion + */ + for &index in &previous_epoch_attester_indices_hashset { + let inclusion_slot = + self.inclusion_slot(&previous_epoch_attestations[..], index, spec)?; + let proposer_index = self + .get_beacon_proposer_index(inclusion_slot, spec) + .map_err(|_| Error::UnableToDetermineProducer)?; + let base_reward = self.base_reward(proposer_index, base_reward_quotient, spec); + safe_add_assign!( + self.validator_balances[proposer_index], + base_reward / spec.includer_reward_quotient + ); + } + + trace!( + "Previous epoch attesters: {}.", + previous_epoch_attester_indices_hashset.len() + ); + + /* + * Crosslinks + */ + for slot in self.previous_epoch(spec).slot_iter(spec.epoch_length) { + let crosslink_committees_at_slot = + self.get_crosslink_committees_at_slot(slot, false, spec)?; + + for (_crosslink_committee, shard) in crosslink_committees_at_slot { + let shard = shard as u64; + + if let Some(Ok(winning_root)) = winning_root_for_shards.get(&shard) { + // TODO: remove the map. + let attesting_validator_indices: HashSet = HashSet::from_iter( + winning_root.attesting_validator_indices.iter().cloned(), + ); + + for index in 0..self.validator_balances.len() { + let base_reward = self.base_reward(index, base_reward_quotient, spec); + + if attesting_validator_indices.contains(&index) { + safe_add_assign!( + self.validator_balances[index], + base_reward * winning_root.total_attesting_balance + / winning_root.total_balance + ); + } else { + safe_sub_assign!(self.validator_balances[index], base_reward); + } + } + + for index in &winning_root.attesting_validator_indices { + let base_reward = self.base_reward(*index, base_reward_quotient, spec); + safe_add_assign!( + self.validator_balances[*index], + base_reward * winning_root.total_attesting_balance + / winning_root.total_balance + ); + } + } + } + } + + /* + * Ejections + */ + self.process_ejections(spec); + + /* + * Validator Registry + */ + self.previous_calculation_epoch = self.current_calculation_epoch; + self.previous_epoch_start_shard = self.current_epoch_start_shard; + self.previous_epoch_seed = self.current_epoch_seed; + + let should_update_validator_registy = if self.finalized_epoch + > self.validator_registry_update_epoch + { + (0..self.get_current_epoch_committee_count(spec)).all(|i| { + let shard = (self.current_epoch_start_shard + i as u64) % spec.shard_count; + self.latest_crosslinks[shard as usize].epoch > self.validator_registry_update_epoch + }) + } else { + false + }; + + if should_update_validator_registy { + self.update_validator_registry(spec); + + self.current_calculation_epoch = next_epoch; + self.current_epoch_start_shard = (self.current_epoch_start_shard + + self.get_current_epoch_committee_count(spec) as u64) + % spec.shard_count; + self.current_epoch_seed = self + .generate_seed(self.current_calculation_epoch, spec) + .ok_or_else(|| Error::NoRandaoSeed)?; + } else { + let epochs_since_last_registry_update = + current_epoch - self.validator_registry_update_epoch; + if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + self.current_calculation_epoch = next_epoch; + self.current_epoch_seed = self + .generate_seed(self.current_calculation_epoch, spec) + .ok_or_else(|| Error::NoRandaoSeed)?; + } + } + + self.process_penalties_and_exits(spec); + + self.latest_index_roots[(next_epoch.as_usize() + spec.entry_exit_delay as usize) + % spec.latest_index_roots_length] = hash_tree_root(get_active_validator_indices( + &self.validator_registry, + next_epoch + Epoch::from(spec.entry_exit_delay), + )); + self.latest_penalized_balances[next_epoch.as_usize() % spec.latest_penalized_exit_length] = + self.latest_penalized_balances + [current_epoch.as_usize() % spec.latest_penalized_exit_length]; + self.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = self + .get_randao_mix(current_epoch, spec) + .and_then(|x| Some(*x)) + .ok_or_else(|| Error::NoRandaoSeed)?; + self.latest_attestations = self + .latest_attestations + .iter() + .filter(|a| a.data.slot.epoch(spec.epoch_length) >= current_epoch) + .cloned() + .collect(); + + debug!("Epoch transition complete."); + + Ok(()) + } +} + +fn hash_tree_root(input: Vec) -> Hash256 { + Hash256::from(&input.hash_tree_root()[..]) +} + +fn winning_root( + state: &BeaconState, + shard: u64, + current_epoch_attestations: &[&PendingAttestation], + previous_epoch_attestations: &[&PendingAttestation], + spec: &ChainSpec, +) -> Result { + let mut attestations = current_epoch_attestations.to_vec(); + attestations.append(&mut previous_epoch_attestations.to_vec()); + + let mut candidates: HashMap = HashMap::new(); + + let mut highest_seen_balance = 0; + + for a in &attestations { + if a.data.shard != shard { + continue; + } + + let shard_block_root = &a.data.shard_block_root; + + if candidates.contains_key(shard_block_root) { + continue; + } + + // TODO: `cargo fmt` makes this rather ugly; tidy up. + let attesting_validator_indices = attestations.iter().try_fold::<_, _, Result< + _, + AttestationParticipantsError, + >>(vec![], |mut acc, a| { + if (a.data.shard == shard) && (a.data.shard_block_root == *shard_block_root) { + acc.append(&mut state.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + } + Ok(acc) + })?; + + let total_balance: u64 = attesting_validator_indices + .iter() + .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); + + let total_attesting_balance: u64 = attesting_validator_indices + .iter() + .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); + + if total_attesting_balance > highest_seen_balance { + highest_seen_balance = total_attesting_balance; + } + + let candidate_root = WinningRoot { + shard_block_root: *shard_block_root, + attesting_validator_indices, + total_attesting_balance, + total_balance, + }; + + candidates.insert(*shard_block_root, candidate_root); + } + + Ok(candidates + .iter() + .filter_map(|(_hash, candidate)| { + if candidate.total_attesting_balance == highest_seen_balance { + Some(candidate) + } else { + None + } + }) + .min_by_key(|candidate| candidate.shard_block_root) + .ok_or_else(|| WinningRootError::NoWinningRoot)? + // TODO: avoid clone. + .clone()) +} + +impl From for Error { + fn from(e: InclusionError) -> Error { + Error::InclusionError(e) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} + +impl From for Error { + fn from(e: AttestationParticipantsError) -> Error { + Error::AttestationParticipantsError(e) + } +} + +impl From for WinningRootError { + fn from(e: AttestationParticipantsError) -> WinningRootError { + WinningRootError::AttestationParticipantsError(e) + } +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs new file mode 100644 index 000000000..18d1f7554 --- /dev/null +++ b/eth2/state_processing/src/lib.rs @@ -0,0 +1,10 @@ +mod block_processable; +mod epoch_processable; +mod slot_processable; + +pub use block_processable::{ + validate_attestation, validate_attestation_without_signature, BlockProcessable, + Error as BlockProcessingError, +}; +pub use epoch_processable::{EpochProcessable, Error as EpochProcessingError}; +pub use slot_processable::{Error as SlotProcessingError, SlotProcessable}; diff --git a/eth2/state_processing/src/slot_processable.rs b/eth2/state_processing/src/slot_processable.rs new file mode 100644 index 000000000..7726c5071 --- /dev/null +++ b/eth2/state_processing/src/slot_processable.rs @@ -0,0 +1,70 @@ +use crate::{EpochProcessable, EpochProcessingError}; +use types::{beacon_state::CommitteesError, BeaconState, ChainSpec, Hash256}; + +#[derive(Debug, PartialEq)] +pub enum Error { + CommitteesError(CommitteesError), + EpochProcessingError(EpochProcessingError), +} + +pub trait SlotProcessable { + fn per_slot_processing( + &mut self, + previous_block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error>; +} + +impl SlotProcessable for BeaconState +where + BeaconState: EpochProcessable, +{ + fn per_slot_processing( + &mut self, + previous_block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + if (self.slot + 1) % spec.epoch_length == 0 { + self.per_epoch_processing(spec)?; + } + + self.slot += 1; + + self.latest_randao_mixes[self.slot.as_usize() % spec.latest_randao_mixes_length] = + self.latest_randao_mixes[(self.slot.as_usize() - 1) % spec.latest_randao_mixes_length]; + + // Block roots. + self.latest_block_roots[(self.slot.as_usize() - 1) % spec.latest_block_roots_length] = + previous_block_root; + + if self.slot.as_usize() % spec.latest_block_roots_length == 0 { + let root = merkle_root(&self.latest_block_roots[..]); + self.batched_block_roots.push(root); + } + Ok(()) + } +} + +fn merkle_root(_input: &[Hash256]) -> Hash256 { + Hash256::zero() +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} + +impl From for Error { + fn from(e: EpochProcessingError) -> Error { + Error::EpochProcessingError(e) + } +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml new file mode 100644 index 000000000..24aabf148 --- /dev/null +++ b/eth2/types/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "types" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bls = { path = "../utils/bls" } +boolean-bitfield = { path = "../utils/boolean-bitfield" } +ethereum-types = "0.4.0" +hashing = { path = "../utils/hashing" } +honey-badger-split = { path = "../utils/honey-badger-split" } +log = "0.4" +rayon = "1.0" +rand = "0.5.5" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slog = "^2.2.3" +ssz = { path = "../utils/ssz" } +vec_shuffle = { path = "../utils/vec_shuffle" } diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs new file mode 100644 index 000000000..eb375d490 --- /dev/null +++ b/eth2/types/src/attestation.rs @@ -0,0 +1,112 @@ +use super::{AggregatePublicKey, AggregateSignature, AttestationData, Bitfield, Hash256}; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct Attestation { + pub aggregation_bitfield: Bitfield, + pub data: AttestationData, + pub custody_bitfield: Bitfield, + pub aggregate_signature: AggregateSignature, +} + +impl Attestation { + pub fn canonical_root(&self) -> Hash256 { + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn signable_message(&self, custody_bit: bool) -> Vec { + self.data.signable_message(custody_bit) + } + + pub fn verify_signature( + &self, + group_public_key: &AggregatePublicKey, + custody_bit: bool, + // TODO: use domain. + _domain: u64, + ) -> bool { + self.aggregate_signature + .verify(&self.signable_message(custody_bit), group_public_key) + } +} + +impl Encodable for Attestation { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.aggregation_bitfield); + s.append(&self.data); + s.append(&self.custody_bitfield); + s.append(&self.aggregate_signature); + } +} + +impl Decodable for Attestation { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (aggregation_bitfield, i) = Bitfield::ssz_decode(bytes, i)?; + let (data, i) = AttestationData::ssz_decode(bytes, i)?; + let (custody_bitfield, i) = Bitfield::ssz_decode(bytes, i)?; + let (aggregate_signature, i) = AggregateSignature::ssz_decode(bytes, i)?; + + let attestation_record = Self { + aggregation_bitfield, + data, + custody_bitfield, + aggregate_signature, + }; + Ok((attestation_record, i)) + } +} + +impl TreeHash for Attestation { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.aggregation_bitfield.hash_tree_root()); + result.append(&mut self.data.hash_tree_root()); + result.append(&mut self.custody_bitfield.hash_tree_root()); + result.append(&mut self.aggregate_signature.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Attestation { + fn random_for_test(rng: &mut T) -> Self { + Self { + data: <_>::random_for_test(rng), + aggregation_bitfield: <_>::random_for_test(rng), + custody_bitfield: <_>::random_for_test(rng), + aggregate_signature: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Attestation::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Attestation::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs new file mode 100644 index 000000000..702bba416 --- /dev/null +++ b/eth2/types/src/attestation_data.rs @@ -0,0 +1,142 @@ +use crate::test_utils::TestRandom; +use crate::{AttestationDataAndCustodyBit, Crosslink, Epoch, Hash256, Slot}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +pub const SSZ_ATTESTION_DATA_LENGTH: usize = { + 8 + // slot + 8 + // shard + 32 + // beacon_block_hash + 32 + // epoch_boundary_root + 32 + // shard_block_hash + 32 + // latest_crosslink_hash + 8 + // justified_epoch + 32 // justified_block_root +}; + +#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)] +pub struct AttestationData { + pub slot: Slot, + pub shard: u64, + pub beacon_block_root: Hash256, + pub epoch_boundary_root: Hash256, + pub shard_block_root: Hash256, + pub latest_crosslink: Crosslink, + pub justified_epoch: Epoch, + pub justified_block_root: Hash256, +} + +impl Eq for AttestationData {} + +impl AttestationData { + pub fn canonical_root(&self) -> Hash256 { + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn signable_message(&self, custody_bit: bool) -> Vec { + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { + data: self.clone(), + custody_bit, + }; + attestation_data_and_custody_bit.hash_tree_root() + } +} + +impl Encodable for AttestationData { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slot); + s.append(&self.shard); + s.append(&self.beacon_block_root); + s.append(&self.epoch_boundary_root); + s.append(&self.shard_block_root); + s.append(&self.latest_crosslink); + s.append(&self.justified_epoch); + s.append(&self.justified_block_root); + } +} + +impl Decodable for AttestationData { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (shard, i) = <_>::ssz_decode(bytes, i)?; + let (beacon_block_root, i) = <_>::ssz_decode(bytes, i)?; + let (epoch_boundary_root, i) = <_>::ssz_decode(bytes, i)?; + let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?; + let (latest_crosslink, i) = <_>::ssz_decode(bytes, i)?; + let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (justified_block_root, i) = <_>::ssz_decode(bytes, i)?; + + let attestation_data = AttestationData { + slot, + shard, + beacon_block_root, + epoch_boundary_root, + shard_block_root, + latest_crosslink, + justified_epoch, + justified_block_root, + }; + Ok((attestation_data, i)) + } +} + +impl TreeHash for AttestationData { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.shard.hash_tree_root()); + result.append(&mut self.beacon_block_root.hash_tree_root()); + result.append(&mut self.epoch_boundary_root.hash_tree_root()); + result.append(&mut self.shard_block_root.hash_tree_root()); + result.append(&mut self.latest_crosslink.hash_tree_root()); + result.append(&mut self.justified_epoch.hash_tree_root()); + result.append(&mut self.justified_block_root.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for AttestationData { + fn random_for_test(rng: &mut T) -> Self { + Self { + slot: <_>::random_for_test(rng), + shard: <_>::random_for_test(rng), + beacon_block_root: <_>::random_for_test(rng), + epoch_boundary_root: <_>::random_for_test(rng), + shard_block_root: <_>::random_for_test(rng), + latest_crosslink: <_>::random_for_test(rng), + justified_epoch: <_>::random_for_test(rng), + justified_block_root: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = AttestationData::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = AttestationData::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs new file mode 100644 index 000000000..4e93dd893 --- /dev/null +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -0,0 +1,81 @@ +use super::AttestationData; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct AttestationDataAndCustodyBit { + pub data: AttestationData, + pub custody_bit: bool, +} + +impl Encodable for AttestationDataAndCustodyBit { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.data); + // TODO: deal with bools + } +} + +impl Decodable for AttestationDataAndCustodyBit { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (data, i) = <_>::ssz_decode(bytes, i)?; + let custody_bit = false; + + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { data, custody_bit }; + + Ok((attestation_data_and_custody_bit, i)) + } +} + +impl TreeHash for AttestationDataAndCustodyBit { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.data.hash_tree_root()); + // TODO: add bool ssz + // result.append(custody_bit.hash_tree_root()); + ssz::hash(&result) + } +} + +impl TestRandom for AttestationDataAndCustodyBit { + fn random_for_test(rng: &mut T) -> Self { + Self { + data: <_>::random_for_test(rng), + // TODO: deal with bools + custody_bit: false, + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs new file mode 100644 index 000000000..0b27d2030 --- /dev/null +++ b/eth2/types/src/attester_slashing.rs @@ -0,0 +1,80 @@ +use crate::{test_utils::TestRandom, SlashableAttestation}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct AttesterSlashing { + pub slashable_attestation_1: SlashableAttestation, + pub slashable_attestation_2: SlashableAttestation, +} + +impl Encodable for AttesterSlashing { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slashable_attestation_1); + s.append(&self.slashable_attestation_2); + } +} + +impl Decodable for AttesterSlashing { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slashable_attestation_1, i) = <_>::ssz_decode(bytes, i)?; + let (slashable_attestation_2, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + AttesterSlashing { + slashable_attestation_1, + slashable_attestation_2, + }, + i, + )) + } +} + +impl TreeHash for AttesterSlashing { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slashable_attestation_1.hash_tree_root()); + result.append(&mut self.slashable_attestation_2.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for AttesterSlashing { + fn random_for_test(rng: &mut T) -> Self { + Self { + slashable_attestation_1: <_>::random_for_test(rng), + slashable_attestation_2: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = AttesterSlashing::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = AttesterSlashing::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs new file mode 100644 index 000000000..f6977595a --- /dev/null +++ b/eth2/types/src/beacon_block.rs @@ -0,0 +1,155 @@ +use crate::test_utils::TestRandom; +use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, ProposalSignedData, Slot}; +use bls::Signature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct BeaconBlock { + pub slot: Slot, + pub parent_root: Hash256, + pub state_root: Hash256, + pub randao_reveal: Signature, + pub eth1_data: Eth1Data, + pub signature: Signature, + pub body: BeaconBlockBody, +} + +impl BeaconBlock { + /// Produce the first block of the Beacon Chain. + pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { + BeaconBlock { + slot: spec.genesis_slot, + parent_root: spec.zero_hash, + state_root, + randao_reveal: spec.empty_signature.clone(), + eth1_data: Eth1Data { + deposit_root: spec.zero_hash, + block_hash: spec.zero_hash, + }, + signature: spec.empty_signature.clone(), + body: BeaconBlockBody { + proposer_slashings: vec![], + attester_slashings: vec![], + attestations: vec![], + deposits: vec![], + exits: vec![], + }, + } + } + + pub fn canonical_root(&self) -> Hash256 { + Hash256::from(&self.hash_tree_root()[..]) + } + + pub fn proposal_root(&self, spec: &ChainSpec) -> Hash256 { + let block_without_signature_root = { + let mut block_without_signature = self.clone(); + block_without_signature.signature = spec.empty_signature.clone(); + block_without_signature.canonical_root() + }; + + let proposal = ProposalSignedData { + slot: self.slot, + shard: spec.beacon_chain_shard_number, + block_root: block_without_signature_root, + }; + Hash256::from(&proposal.hash_tree_root()[..]) + } +} + +impl Encodable for BeaconBlock { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slot); + s.append(&self.parent_root); + s.append(&self.state_root); + s.append(&self.randao_reveal); + s.append(&self.eth1_data); + s.append(&self.signature); + s.append(&self.body); + } +} + +impl Decodable for BeaconBlock { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (parent_root, i) = <_>::ssz_decode(bytes, i)?; + let (state_root, i) = <_>::ssz_decode(bytes, i)?; + let (randao_reveal, i) = <_>::ssz_decode(bytes, i)?; + let (eth1_data, i) = <_>::ssz_decode(bytes, i)?; + let (signature, i) = <_>::ssz_decode(bytes, i)?; + let (body, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + slot, + parent_root, + state_root, + randao_reveal, + eth1_data, + signature, + body, + }, + i, + )) + } +} + +impl TreeHash for BeaconBlock { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.parent_root.hash_tree_root()); + result.append(&mut self.state_root.hash_tree_root()); + result.append(&mut self.randao_reveal.hash_tree_root()); + result.append(&mut self.eth1_data.hash_tree_root()); + result.append(&mut self.signature.hash_tree_root()); + result.append(&mut self.body.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for BeaconBlock { + fn random_for_test(rng: &mut T) -> Self { + Self { + slot: <_>::random_for_test(rng), + parent_root: <_>::random_for_test(rng), + state_root: <_>::random_for_test(rng), + randao_reveal: <_>::random_for_test(rng), + eth1_data: <_>::random_for_test(rng), + signature: <_>::random_for_test(rng), + body: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconBlock::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconBlock::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs new file mode 100644 index 000000000..d3a61f7ba --- /dev/null +++ b/eth2/types/src/beacon_block_body.rs @@ -0,0 +1,99 @@ +use super::{Attestation, AttesterSlashing, Deposit, Exit, ProposerSlashing}; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct BeaconBlockBody { + pub proposer_slashings: Vec, + pub attester_slashings: Vec, + pub attestations: Vec, + pub deposits: Vec, + pub exits: Vec, +} + +impl Encodable for BeaconBlockBody { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.proposer_slashings); + s.append_vec(&self.attester_slashings); + s.append_vec(&self.attestations); + s.append_vec(&self.deposits); + s.append_vec(&self.exits); + } +} + +impl Decodable for BeaconBlockBody { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (proposer_slashings, i) = <_>::ssz_decode(bytes, i)?; + let (attester_slashings, i) = <_>::ssz_decode(bytes, i)?; + let (attestations, i) = <_>::ssz_decode(bytes, i)?; + let (deposits, i) = <_>::ssz_decode(bytes, i)?; + let (exits, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + proposer_slashings, + attester_slashings, + attestations, + deposits, + exits, + }, + i, + )) + } +} + +impl TreeHash for BeaconBlockBody { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.proposer_slashings.hash_tree_root()); + result.append(&mut self.attester_slashings.hash_tree_root()); + result.append(&mut self.attestations.hash_tree_root()); + result.append(&mut self.deposits.hash_tree_root()); + result.append(&mut self.exits.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for BeaconBlockBody { + fn random_for_test(rng: &mut T) -> Self { + Self { + proposer_slashings: <_>::random_for_test(rng), + attester_slashings: <_>::random_for_test(rng), + attestations: <_>::random_for_test(rng), + deposits: <_>::random_for_test(rng), + exits: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconBlockBody::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconBlockBody::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs new file mode 100644 index 000000000..ed53bfea9 --- /dev/null +++ b/eth2/types/src/beacon_state.rs @@ -0,0 +1,1121 @@ +use crate::test_utils::TestRandom; +use crate::{ + validator::StatusFlags, validator_registry::get_active_validator_indices, AttestationData, + Bitfield, ChainSpec, Crosslink, Deposit, Epoch, Eth1Data, Eth1DataVote, Fork, Hash256, + PendingAttestation, PublicKey, Signature, Slot, Validator, +}; +use bls::verify_proof_of_possession; +use honey_badger_split::SplitExt; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use std::ops::Range; +use vec_shuffle::shuffle; + +pub enum Error { + InsufficientValidators, + BadBlockSignature, + InvalidEpoch(Slot, Range), + CommitteesError(CommitteesError), +} + +#[derive(Debug, PartialEq)] +pub enum CommitteesError { + InvalidEpoch, + InsufficientNumberOfValidators, + BadRandao, +} + +#[derive(Debug, PartialEq)] +pub enum InclusionError { + NoIncludedAttestations, + AttestationParticipantsError(AttestationParticipantsError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationParticipantsError { + NoCommitteeForShard, + NoCommittees, + BadBitfieldLength, + CommitteesError(CommitteesError), +} + +#[derive(Debug, PartialEq)] +pub enum AttestationValidationError { + IncludedTooEarly, + IncludedTooLate, + WrongJustifiedSlot, + WrongJustifiedRoot, + BadLatestCrosslinkRoot, + BadSignature, + ShardBlockRootNotZero, + NoBlockRoot, + AttestationParticipantsError(AttestationParticipantsError), +} + +macro_rules! safe_add_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_add($b); + }; +} +macro_rules! safe_sub_assign { + ($a: expr, $b: expr) => { + $a = $a.saturating_sub($b); + }; +} + +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct BeaconState { + // Misc + pub slot: Slot, + pub genesis_time: u64, + pub fork: Fork, + + // Validator registry + pub validator_registry: Vec, + pub validator_balances: Vec, + pub validator_registry_update_epoch: Epoch, + + // Randomness and committees + pub latest_randao_mixes: Vec, + pub previous_epoch_start_shard: u64, + pub current_epoch_start_shard: u64, + pub previous_calculation_epoch: Epoch, + pub current_calculation_epoch: Epoch, + pub previous_epoch_seed: Hash256, + pub current_epoch_seed: Hash256, + + // Finality + pub previous_justified_epoch: Epoch, + pub justified_epoch: Epoch, + pub justification_bitfield: u64, + pub finalized_epoch: Epoch, + + // Recent state + pub latest_crosslinks: Vec, + pub latest_block_roots: Vec, + pub latest_index_roots: Vec, + pub latest_penalized_balances: Vec, + pub latest_attestations: Vec, + pub batched_block_roots: Vec, + + // Ethereum 1.0 chain data + pub latest_eth1_data: Eth1Data, + pub eth1_data_votes: Vec, +} + +impl BeaconState { + /// Produce the first state of the Beacon Chain. + pub fn genesis( + genesis_time: u64, + initial_validator_deposits: Vec, + latest_eth1_data: Eth1Data, + spec: &ChainSpec, + ) -> BeaconState { + let initial_crosslink = Crosslink { + epoch: spec.genesis_epoch, + shard_block_root: spec.zero_hash, + }; + + let mut genesis_state = BeaconState { + /* + * Misc + */ + slot: spec.genesis_slot, + genesis_time, + fork: Fork { + previous_version: spec.genesis_fork_version, + current_version: spec.genesis_fork_version, + epoch: spec.genesis_epoch, + }, + + /* + * Validator registry + */ + validator_registry: vec![], // Set later in the function. + validator_balances: vec![], // Set later in the function. + validator_registry_update_epoch: spec.genesis_epoch, + + /* + * Randomness and committees + */ + latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize], + previous_epoch_start_shard: spec.genesis_start_shard, + current_epoch_start_shard: spec.genesis_start_shard, + previous_calculation_epoch: spec.genesis_epoch, + current_calculation_epoch: spec.genesis_epoch, + previous_epoch_seed: spec.zero_hash, + current_epoch_seed: spec.zero_hash, + + /* + * Finality + */ + previous_justified_epoch: spec.genesis_epoch, + justified_epoch: spec.genesis_epoch, + justification_bitfield: 0, + finalized_epoch: spec.genesis_epoch, + + /* + * Recent state + */ + latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize], + latest_block_roots: vec![spec.zero_hash; spec.latest_block_roots_length as usize], + latest_index_roots: vec![spec.zero_hash; spec.latest_index_roots_length as usize], + latest_penalized_balances: vec![0; spec.latest_penalized_exit_length as usize], + latest_attestations: vec![], + batched_block_roots: vec![], + + /* + * PoW receipt root + */ + latest_eth1_data, + eth1_data_votes: vec![], + }; + + for deposit in initial_validator_deposits { + let _index = genesis_state.process_deposit( + deposit.deposit_data.deposit_input.pubkey, + deposit.deposit_data.amount, + deposit.deposit_data.deposit_input.proof_of_possession, + deposit.deposit_data.deposit_input.withdrawal_credentials, + spec, + ); + } + + for validator_index in 0..genesis_state.validator_registry.len() { + if genesis_state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount + { + genesis_state.activate_validator(validator_index, true, spec); + } + } + + let genesis_active_index_root = hash_tree_root(get_active_validator_indices( + &genesis_state.validator_registry, + spec.genesis_epoch, + )); + genesis_state.latest_index_roots = + vec![genesis_active_index_root; spec.latest_index_roots_length]; + genesis_state.current_epoch_seed = genesis_state + .generate_seed(spec.genesis_epoch, spec) + .expect("Unable to generate seed."); + + genesis_state + } + + /// Return the tree hash root for this `BeaconState`. + /// + /// Spec v0.2.0 + pub fn canonical_root(&self) -> Hash256 { + Hash256::from(&self.hash_tree_root()[..]) + } + + /// The epoch corresponding to `self.slot`. + /// + /// Spec v0.2.0 + pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { + self.slot.epoch(spec.epoch_length) + } + + /// The epoch prior to `self.current_epoch()`. + /// + /// Spec v0.2.0 + pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { + self.current_epoch(spec).saturating_sub(1_u64) + } + + /// The epoch following `self.current_epoch()`. + /// + /// Spec v0.2.0 + pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { + self.current_epoch(spec).saturating_add(1_u64) + } + + /// The first slot of the epoch corresponding to `self.slot`. + /// + /// Spec v0.2.0 + pub fn current_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { + self.current_epoch(spec).start_slot(spec.epoch_length) + } + + /// The first slot of the epoch preceeding the one corresponding to `self.slot`. + /// + /// Spec v0.2.0 + pub fn previous_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { + self.previous_epoch(spec).start_slot(spec.epoch_length) + } + + /// Return the number of committees in one epoch. + /// + /// TODO: this should probably be a method on `ChainSpec`. + /// + /// Spec v0.2.0 + pub fn get_epoch_committee_count( + &self, + active_validator_count: usize, + spec: &ChainSpec, + ) -> u64 { + std::cmp::max( + 1, + std::cmp::min( + spec.shard_count / spec.epoch_length, + active_validator_count as u64 / spec.epoch_length / spec.target_committee_size, + ), + ) * spec.epoch_length + } + + /// Shuffle ``validators`` into crosslink committees seeded by ``seed`` and ``epoch``. + /// Return a list of ``committees_per_epoch`` committees where each + /// committee is itself a list of validator indices. + /// + /// Spec v0.1 + pub fn get_shuffling(&self, seed: Hash256, epoch: Epoch, spec: &ChainSpec) -> Vec> { + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, epoch); + + let committees_per_epoch = + self.get_epoch_committee_count(active_validator_indices.len(), spec); + + // TODO: check that Hash256::from(u64) matches 'int_to_bytes32'. + let seed = seed ^ Hash256::from(epoch.as_u64()); + // TODO: fix `expect` assert. + let shuffled_active_validator_indices = + shuffle(&seed, active_validator_indices).expect("Max validator count exceed!"); + + shuffled_active_validator_indices + .honey_badger_split(committees_per_epoch as usize) + .map(|slice: &[usize]| slice.to_vec()) + .collect() + } + + /// Return the number of committees in the previous epoch. + /// + /// Spec v0.2.0 + fn get_previous_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let previous_active_validators = + get_active_validator_indices(&self.validator_registry, self.previous_calculation_epoch); + self.get_epoch_committee_count(previous_active_validators.len(), spec) + } + + /// Return the number of committees in the current epoch. + /// + /// Spec v0.2.0 + pub fn get_current_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let current_active_validators = + get_active_validator_indices(&self.validator_registry, self.current_calculation_epoch); + self.get_epoch_committee_count(current_active_validators.len(), spec) + } + + /// Return the number of committees in the next epoch. + /// + /// Spec v0.2.0 + pub fn get_next_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { + let current_active_validators = + get_active_validator_indices(&self.validator_registry, self.next_epoch(spec)); + self.get_epoch_committee_count(current_active_validators.len(), spec) + } + + pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Option { + let current_epoch = self.current_epoch(spec); + + let earliest_index_root = current_epoch - Epoch::from(spec.latest_index_roots_length) + + Epoch::from(spec.entry_exit_delay) + + 1; + let latest_index_root = current_epoch + spec.entry_exit_delay; + + if (epoch <= earliest_index_root) & (epoch >= latest_index_root) { + Some(self.latest_index_roots[epoch.as_usize() % spec.latest_index_roots_length]) + } else { + None + } + } + + /// Generate a seed for the given ``epoch``. + /// + /// Spec v0.2.0 + pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Option { + let mut input = self.get_randao_mix(epoch, spec)?.to_vec(); + input.append(&mut self.get_active_index_root(epoch, spec)?.to_vec()); + // TODO: ensure `Hash256::from(u64)` == `int_to_bytes32`. + input.append(&mut Hash256::from(epoch.as_u64()).to_vec()); + Some(Hash256::from(&hash(&input[..])[..])) + } + + /// Return the list of ``(committee, shard)`` tuples for the ``slot``. + /// + /// Note: There are two possible shufflings for crosslink committees for a + /// `slot` in the next epoch: with and without a `registry_change` + /// + /// Spec v0.2.0 + pub fn get_crosslink_committees_at_slot( + &self, + slot: Slot, + registry_change: bool, + spec: &ChainSpec, + ) -> Result, u64)>, CommitteesError> { + let epoch = slot.epoch(spec.epoch_length); + let current_epoch = self.current_epoch(spec); + let previous_epoch = if current_epoch == spec.genesis_epoch { + current_epoch + } else { + current_epoch.saturating_sub(1_u64) + }; + let next_epoch = self.next_epoch(spec); + + let (committees_per_epoch, seed, shuffling_epoch, shuffling_start_shard) = + if epoch == previous_epoch { + ( + self.get_previous_epoch_committee_count(spec), + self.previous_epoch_seed, + self.previous_calculation_epoch, + self.previous_epoch_start_shard, + ) + } else if epoch == current_epoch { + ( + self.get_current_epoch_committee_count(spec), + self.current_epoch_seed, + self.current_calculation_epoch, + self.current_epoch_start_shard, + ) + } else if epoch == next_epoch { + let current_committees_per_epoch = self.get_current_epoch_committee_count(spec); + let epochs_since_last_registry_update = + current_epoch - self.validator_registry_update_epoch; + let (seed, shuffling_start_shard) = if registry_change { + let next_seed = self + .generate_seed(next_epoch, spec) + .ok_or_else(|| CommitteesError::BadRandao)?; + ( + next_seed, + (self.current_epoch_start_shard + current_committees_per_epoch) + % spec.shard_count, + ) + } else if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + let next_seed = self + .generate_seed(next_epoch, spec) + .ok_or_else(|| CommitteesError::BadRandao)?; + (next_seed, self.current_epoch_start_shard) + } else { + (self.current_epoch_seed, self.current_epoch_start_shard) + }; + ( + self.get_next_epoch_committee_count(spec), + seed, + next_epoch, + shuffling_start_shard, + ) + } else { + panic!("Epoch out-of-bounds.") + }; + + let shuffling = self.get_shuffling(seed, shuffling_epoch, spec); + let offset = slot.as_u64() % spec.epoch_length; + let committees_per_slot = committees_per_epoch / spec.epoch_length; + let slot_start_shard = + (shuffling_start_shard + committees_per_slot * offset) % spec.shard_count; + + let mut crosslinks_at_slot = vec![]; + for i in 0..committees_per_slot { + let tuple = ( + shuffling[(committees_per_slot * offset + i) as usize].clone(), + (slot_start_shard + i) % spec.shard_count, + ); + crosslinks_at_slot.push(tuple) + } + Ok(crosslinks_at_slot) + } + + /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an + /// attestation. + /// + /// Spec v0.2.0 + pub fn attestation_slot_and_shard_for_validator( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result, CommitteesError> { + let mut result = None; + for slot in self.current_epoch(spec).slot_iter(spec.epoch_length) { + for (committee, shard) in self.get_crosslink_committees_at_slot(slot, false, spec)? { + if let Some(committee_index) = committee.iter().position(|&i| i == validator_index) + { + result = Some((slot, shard, committee_index as u64)); + } + } + } + Ok(result) + } + + /// An entry or exit triggered in the ``epoch`` given by the input takes effect at + /// the epoch given by the output. + /// + /// Spec v0.2.0 + pub fn get_entry_exit_effect_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { + epoch + 1 + spec.entry_exit_delay + } + + /// Returns the beacon proposer index for the `slot`. + /// + /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. + /// + /// Spec v0.2.0 + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result { + let committees = self.get_crosslink_committees_at_slot(slot, false, spec)?; + committees + .first() + .ok_or(CommitteesError::InsufficientNumberOfValidators) + .and_then(|(first_committee, _)| { + let index = (slot.as_usize()) + .checked_rem(first_committee.len()) + .ok_or(CommitteesError::InsufficientNumberOfValidators)?; + // NOTE: next index will not panic as we have already returned if this is the case. + Ok(first_committee[index]) + }) + } + + /// Process the penalties and prepare the validators who are eligible to withdrawal. + /// + /// Spec v0.2.0 + pub fn process_penalties_and_exits(&mut self, spec: &ChainSpec) { + let current_epoch = self.current_epoch(spec); + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, current_epoch); + let total_balance = self.get_total_balance(&active_validator_indices[..], spec); + + for index in 0..self.validator_balances.len() { + let validator = &self.validator_registry[index]; + + if current_epoch + == validator.penalized_epoch + Epoch::from(spec.latest_penalized_exit_length / 2) + { + let epoch_index: usize = + current_epoch.as_usize() % spec.latest_penalized_exit_length; + + let total_at_start = self.latest_penalized_balances + [(epoch_index + 1) % spec.latest_penalized_exit_length]; + let total_at_end = self.latest_penalized_balances[epoch_index]; + let total_penalities = total_at_end.saturating_sub(total_at_start); + let penalty = self.get_effective_balance(index, spec) + * std::cmp::min(total_penalities * 3, total_balance) + / total_balance; + safe_sub_assign!(self.validator_balances[index], penalty); + } + } + + let eligible = |index: usize| { + let validator = &self.validator_registry[index]; + + if validator.penalized_epoch <= current_epoch { + let penalized_withdrawal_epochs = spec.latest_penalized_exit_length / 2; + current_epoch >= validator.penalized_epoch + penalized_withdrawal_epochs as u64 + } else { + current_epoch >= validator.exit_epoch + spec.min_validator_withdrawal_epochs + } + }; + + let mut eligable_indices: Vec = (0..self.validator_registry.len()) + .filter(|i| eligible(*i)) + .collect(); + eligable_indices.sort_by_key(|i| self.validator_registry[*i].exit_epoch); + for (withdrawn_so_far, index) in eligable_indices.iter().enumerate() { + self.prepare_validator_for_withdrawal(*index); + if withdrawn_so_far as u64 >= spec.max_withdrawals_per_epoch { + break; + } + } + } + + /// Return the randao mix at a recent ``epoch``. + /// + /// Returns `None` if the epoch is out-of-bounds of `self.latest_randao_mixes`. + /// + /// Spec v0.2.0 + pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Option<&Hash256> { + self.latest_randao_mixes + .get(epoch.as_usize() % spec.latest_randao_mixes_length) + } + + /// Update validator registry, activating/exiting validators if possible. + /// + /// Spec v0.2.0 + pub fn update_validator_registry(&mut self, spec: &ChainSpec) { + let current_epoch = self.current_epoch(spec); + let active_validator_indices = + get_active_validator_indices(&self.validator_registry, current_epoch); + let total_balance = self.get_total_balance(&active_validator_indices[..], spec); + + let max_balance_churn = std::cmp::max( + spec.max_deposit_amount, + total_balance / (2 * spec.max_balance_churn_quotient), + ); + + let mut balance_churn = 0; + for index in 0..self.validator_registry.len() { + let validator = &self.validator_registry[index]; + + if (validator.activation_epoch > self.get_entry_exit_effect_epoch(current_epoch, spec)) + && self.validator_balances[index] >= spec.max_deposit_amount + { + balance_churn += self.get_effective_balance(index, spec); + if balance_churn > max_balance_churn { + break; + } + self.activate_validator(index, false, spec); + } + } + + let mut balance_churn = 0; + for index in 0..self.validator_registry.len() { + let validator = &self.validator_registry[index]; + + if (validator.exit_epoch > self.get_entry_exit_effect_epoch(current_epoch, spec)) + && validator.status_flags == Some(StatusFlags::InitiatedExit) + { + balance_churn += self.get_effective_balance(index, spec); + if balance_churn > max_balance_churn { + break; + } + + self.exit_validator(index, spec); + } + } + + self.validator_registry_update_epoch = current_epoch; + } + /// Process a validator deposit, returning the validator index if the deposit is valid. + /// + /// Spec v0.2.0 + pub fn process_deposit( + &mut self, + pubkey: PublicKey, + amount: u64, + proof_of_possession: Signature, + withdrawal_credentials: Hash256, + spec: &ChainSpec, + ) -> Result { + // TODO: ensure verify proof-of-possession represents the spec accurately. + if !verify_proof_of_possession(&proof_of_possession, &pubkey) { + return Err(()); + } + + if let Some(index) = self + .validator_registry + .iter() + .position(|v| v.pubkey == pubkey) + { + if self.validator_registry[index].withdrawal_credentials == withdrawal_credentials { + safe_add_assign!(self.validator_balances[index], amount); + Ok(index) + } else { + Err(()) + } + } else { + let validator = Validator { + pubkey, + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawal_epoch: spec.far_future_epoch, + penalized_epoch: spec.far_future_epoch, + status_flags: None, + }; + self.validator_registry.push(validator); + self.validator_balances.push(amount); + Ok(self.validator_registry.len() - 1) + } + } + + /// Activate the validator of the given ``index``. + /// + /// Spec v0.2.0 + pub fn activate_validator( + &mut self, + validator_index: usize, + is_genesis: bool, + spec: &ChainSpec, + ) { + let current_epoch = self.current_epoch(spec); + + self.validator_registry[validator_index].activation_epoch = if is_genesis { + spec.genesis_epoch + } else { + self.get_entry_exit_effect_epoch(current_epoch, spec) + } + } + + /// Initiate an exit for the validator of the given `index`. + /// + /// Spec v0.2.0 + pub fn initiate_validator_exit(&mut self, validator_index: usize) { + // TODO: the spec does an `|=` here, ensure this isn't buggy. + self.validator_registry[validator_index].status_flags = Some(StatusFlags::InitiatedExit); + } + + /// Exit the validator of the given `index`. + /// + /// Spec v0.2.0 + fn exit_validator(&mut self, validator_index: usize, spec: &ChainSpec) { + let current_epoch = self.current_epoch(spec); + + if self.validator_registry[validator_index].exit_epoch + <= self.get_entry_exit_effect_epoch(current_epoch, spec) + { + return; + } + + self.validator_registry[validator_index].exit_epoch = + self.get_entry_exit_effect_epoch(current_epoch, spec); + } + + /// Penalize the validator of the given ``index``. + /// + /// Exits the validator and assigns its effective balance to the block producer for this + /// state. + /// + /// Spec v0.2.0 + pub fn penalize_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), CommitteesError> { + self.exit_validator(validator_index, spec); + let current_epoch = self.current_epoch(spec); + + self.latest_penalized_balances + [current_epoch.as_usize() % spec.latest_penalized_exit_length] += + self.get_effective_balance(validator_index, spec); + + let whistleblower_index = self.get_beacon_proposer_index(self.slot, spec)?; + let whistleblower_reward = self.get_effective_balance(validator_index, spec); + safe_add_assign!( + self.validator_balances[whistleblower_index as usize], + whistleblower_reward + ); + safe_sub_assign!( + self.validator_balances[validator_index], + whistleblower_reward + ); + self.validator_registry[validator_index].penalized_epoch = current_epoch; + Ok(()) + } + + /// Initiate an exit for the validator of the given `index`. + /// + /// Spec v0.2.0 + pub fn prepare_validator_for_withdrawal(&mut self, validator_index: usize) { + //TODO: we're not ANDing here, we're setting. Potentially wrong. + self.validator_registry[validator_index].status_flags = Some(StatusFlags::Withdrawable); + } + + /// Iterate through the validator registry and eject active validators with balance below + /// ``EJECTION_BALANCE``. + /// + /// Spec v0.2.0 + pub fn process_ejections(&mut self, spec: &ChainSpec) { + for validator_index in + get_active_validator_indices(&self.validator_registry, self.current_epoch(spec)) + { + if self.validator_balances[validator_index] < spec.ejection_balance { + self.exit_validator(validator_index, spec) + } + } + } + + /// Returns the penality that should be applied to some validator for inactivity. + /// + /// Note: this is defined "inline" in the spec, not as a helper function. + /// + /// Spec v0.2.0 + pub fn inactivity_penalty( + &self, + validator_index: usize, + epochs_since_finality: Epoch, + base_reward_quotient: u64, + spec: &ChainSpec, + ) -> u64 { + let effective_balance = self.get_effective_balance(validator_index, spec); + self.base_reward(validator_index, base_reward_quotient, spec) + + effective_balance * epochs_since_finality.as_u64() + / spec.inactivity_penalty_quotient + / 2 + } + + /// Returns the distance between the first included attestation for some validator and this + /// slot. + /// + /// Note: In the spec this is defined "inline", not as a helper function. + /// + /// Spec v0.2.0 + pub fn inclusion_distance( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let attestation = + self.earliest_included_attestation(attestations, validator_index, spec)?; + Ok((attestation.inclusion_slot - attestation.data.slot).as_u64()) + } + + /// Returns the slot of the earliest included attestation for some validator. + /// + /// Note: In the spec this is defined "inline", not as a helper function. + /// + /// Spec v0.2.0 + pub fn inclusion_slot( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let attestation = + self.earliest_included_attestation(attestations, validator_index, spec)?; + Ok(attestation.inclusion_slot) + } + + /// Finds the earliest included attestation for some validator. + /// + /// Note: In the spec this is defined "inline", not as a helper function. + /// + /// Spec v0.2.0 + fn earliest_included_attestation( + &self, + attestations: &[&PendingAttestation], + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let mut included_attestations = vec![]; + + for (i, a) in attestations.iter().enumerate() { + let participants = + self.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + if participants.iter().any(|i| *i == validator_index) { + included_attestations.push(i); + } + } + + let earliest_attestation_index = included_attestations + .iter() + .min_by_key(|i| attestations[**i].inclusion_slot) + .ok_or_else(|| InclusionError::NoIncludedAttestations)?; + Ok(attestations[*earliest_attestation_index].clone()) + } + + /// Returns the base reward for some validator. + /// + /// Note: In the spec this is defined "inline", not as a helper function. + /// + /// Spec v0.2.0 + pub fn base_reward( + &self, + validator_index: usize, + base_reward_quotient: u64, + spec: &ChainSpec, + ) -> u64 { + self.get_effective_balance(validator_index, spec) / base_reward_quotient / 5 + } + + /// Return the combined effective balance of an array of validators. + /// + /// Spec v0.2.0 + pub fn get_total_balance(&self, validator_indices: &[usize], spec: &ChainSpec) -> u64 { + validator_indices + .iter() + .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)) + } + + /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. + /// + /// Spec v0.2.0 + pub fn get_effective_balance(&self, validator_index: usize, spec: &ChainSpec) -> u64 { + std::cmp::min( + self.validator_balances[validator_index], + spec.max_deposit_amount, + ) + } + + /// Return the block root at a recent `slot`. + /// + /// Spec v0.2.0 + pub fn get_block_root(&self, slot: Slot, spec: &ChainSpec) -> Option<&Hash256> { + self.latest_block_roots + .get(slot.as_usize() % spec.latest_block_roots_length) + } + + pub fn get_attestation_participants_union( + &self, + attestations: &[&PendingAttestation], + spec: &ChainSpec, + ) -> Result, AttestationParticipantsError> { + let mut all_participants = attestations.iter().try_fold::<_, _, Result< + Vec, + AttestationParticipantsError, + >>(vec![], |mut acc, a| { + acc.append(&mut self.get_attestation_participants( + &a.data, + &a.aggregation_bitfield, + spec, + )?); + Ok(acc) + })?; + all_participants.sort_unstable(); + all_participants.dedup(); + Ok(all_participants) + } + + /// Return the participant indices at for the ``attestation_data`` and ``bitfield``. + /// + /// In effect, this converts the "committee indices" on the bitfield into "validator indices" + /// for self.validator_registy. + /// + /// Spec v0.2.0 + pub fn get_attestation_participants( + &self, + attestation_data: &AttestationData, + bitfield: &Bitfield, + spec: &ChainSpec, + ) -> Result, AttestationParticipantsError> { + let crosslink_committees = + self.get_crosslink_committees_at_slot(attestation_data.slot, false, spec)?; + + let committee_index: usize = crosslink_committees + .iter() + .position(|(_committee, shard)| *shard == attestation_data.shard) + .ok_or_else(|| AttestationParticipantsError::NoCommitteeForShard)?; + let (crosslink_committee, _shard) = &crosslink_committees[committee_index]; + + /* + * TODO: verify bitfield length is valid. + */ + + let mut participants = vec![]; + for (i, validator_index) in crosslink_committee.iter().enumerate() { + if bitfield.get(i).unwrap() { + participants.push(*validator_index); + } + } + Ok(participants) + } +} + +fn hash_tree_root(input: Vec) -> Hash256 { + Hash256::from(&input.hash_tree_root()[..]) +} + +impl From for AttestationValidationError { + fn from(e: AttestationParticipantsError) -> AttestationValidationError { + AttestationValidationError::AttestationParticipantsError(e) + } +} + +impl From for AttestationParticipantsError { + fn from(e: CommitteesError) -> AttestationParticipantsError { + AttestationParticipantsError::CommitteesError(e) + } +} + +/* + +*/ + +impl From for InclusionError { + fn from(e: AttestationParticipantsError) -> InclusionError { + InclusionError::AttestationParticipantsError(e) + } +} + +impl From for Error { + fn from(e: CommitteesError) -> Error { + Error::CommitteesError(e) + } +} + +impl Encodable for BeaconState { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slot); + s.append(&self.genesis_time); + s.append(&self.fork); + s.append(&self.validator_registry); + s.append(&self.validator_balances); + s.append(&self.validator_registry_update_epoch); + s.append(&self.latest_randao_mixes); + s.append(&self.previous_epoch_start_shard); + s.append(&self.current_epoch_start_shard); + s.append(&self.previous_calculation_epoch); + s.append(&self.current_calculation_epoch); + s.append(&self.previous_epoch_seed); + s.append(&self.current_epoch_seed); + s.append(&self.previous_justified_epoch); + s.append(&self.justified_epoch); + s.append(&self.justification_bitfield); + s.append(&self.finalized_epoch); + s.append(&self.latest_crosslinks); + s.append(&self.latest_block_roots); + s.append(&self.latest_index_roots); + s.append(&self.latest_penalized_balances); + s.append(&self.latest_attestations); + s.append(&self.batched_block_roots); + s.append(&self.latest_eth1_data); + s.append(&self.eth1_data_votes); + } +} + +impl Decodable for BeaconState { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (genesis_time, i) = <_>::ssz_decode(bytes, i)?; + let (fork, i) = <_>::ssz_decode(bytes, i)?; + let (validator_registry, i) = <_>::ssz_decode(bytes, i)?; + let (validator_balances, i) = <_>::ssz_decode(bytes, i)?; + let (validator_registry_update_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (latest_randao_mixes, i) = <_>::ssz_decode(bytes, i)?; + let (previous_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?; + let (current_epoch_start_shard, i) = <_>::ssz_decode(bytes, i)?; + let (previous_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (current_calculation_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (previous_epoch_seed, i) = <_>::ssz_decode(bytes, i)?; + let (current_epoch_seed, i) = <_>::ssz_decode(bytes, i)?; + let (previous_justified_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (justification_bitfield, i) = <_>::ssz_decode(bytes, i)?; + let (finalized_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?; + let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_index_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_penalized_balances, i) = <_>::ssz_decode(bytes, i)?; + let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?; + let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?; + let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?; + let (eth1_data_votes, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + slot, + genesis_time, + fork, + validator_registry, + validator_balances, + validator_registry_update_epoch, + latest_randao_mixes, + previous_epoch_start_shard, + current_epoch_start_shard, + previous_calculation_epoch, + current_calculation_epoch, + previous_epoch_seed, + current_epoch_seed, + previous_justified_epoch, + justified_epoch, + justification_bitfield, + finalized_epoch, + latest_crosslinks, + latest_block_roots, + latest_index_roots, + latest_penalized_balances, + latest_attestations, + batched_block_roots, + latest_eth1_data, + eth1_data_votes, + }, + i, + )) + } +} + +impl TreeHash for BeaconState { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.genesis_time.hash_tree_root()); + result.append(&mut self.fork.hash_tree_root()); + result.append(&mut self.validator_registry.hash_tree_root()); + result.append(&mut self.validator_balances.hash_tree_root()); + result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); + result.append(&mut self.latest_randao_mixes.hash_tree_root()); + result.append(&mut self.previous_epoch_start_shard.hash_tree_root()); + result.append(&mut self.current_epoch_start_shard.hash_tree_root()); + result.append(&mut self.previous_calculation_epoch.hash_tree_root()); + result.append(&mut self.current_calculation_epoch.hash_tree_root()); + result.append(&mut self.previous_epoch_seed.hash_tree_root()); + result.append(&mut self.current_epoch_seed.hash_tree_root()); + result.append(&mut self.previous_justified_epoch.hash_tree_root()); + result.append(&mut self.justified_epoch.hash_tree_root()); + result.append(&mut self.justification_bitfield.hash_tree_root()); + result.append(&mut self.finalized_epoch.hash_tree_root()); + result.append(&mut self.latest_crosslinks.hash_tree_root()); + result.append(&mut self.latest_block_roots.hash_tree_root()); + result.append(&mut self.latest_index_roots.hash_tree_root()); + result.append(&mut self.latest_penalized_balances.hash_tree_root()); + result.append(&mut self.latest_attestations.hash_tree_root()); + result.append(&mut self.batched_block_roots.hash_tree_root()); + result.append(&mut self.latest_eth1_data.hash_tree_root()); + result.append(&mut self.eth1_data_votes.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for BeaconState { + fn random_for_test(rng: &mut T) -> Self { + Self { + slot: <_>::random_for_test(rng), + genesis_time: <_>::random_for_test(rng), + fork: <_>::random_for_test(rng), + validator_registry: <_>::random_for_test(rng), + validator_balances: <_>::random_for_test(rng), + validator_registry_update_epoch: <_>::random_for_test(rng), + latest_randao_mixes: <_>::random_for_test(rng), + previous_epoch_start_shard: <_>::random_for_test(rng), + current_epoch_start_shard: <_>::random_for_test(rng), + previous_calculation_epoch: <_>::random_for_test(rng), + current_calculation_epoch: <_>::random_for_test(rng), + previous_epoch_seed: <_>::random_for_test(rng), + current_epoch_seed: <_>::random_for_test(rng), + previous_justified_epoch: <_>::random_for_test(rng), + justified_epoch: <_>::random_for_test(rng), + justification_bitfield: <_>::random_for_test(rng), + finalized_epoch: <_>::random_for_test(rng), + latest_crosslinks: <_>::random_for_test(rng), + latest_block_roots: <_>::random_for_test(rng), + latest_index_roots: <_>::random_for_test(rng), + latest_penalized_balances: <_>::random_for_test(rng), + latest_attestations: <_>::random_for_test(rng), + batched_block_roots: <_>::random_for_test(rng), + latest_eth1_data: <_>::random_for_test(rng), + eth1_data_votes: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconState::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = BeaconState::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/casper_slashing.rs b/eth2/types/src/casper_slashing.rs new file mode 100644 index 000000000..0eab069b4 --- /dev/null +++ b/eth2/types/src/casper_slashing.rs @@ -0,0 +1,81 @@ +use super::SlashableVoteData; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct CasperSlashing { + pub slashable_vote_data_1: SlashableVoteData, + pub slashable_vote_data_2: SlashableVoteData, +} + +impl Encodable for CasperSlashing { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slashable_vote_data_1); + s.append(&self.slashable_vote_data_2); + } +} + +impl Decodable for CasperSlashing { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slashable_vote_data_1, i) = <_>::ssz_decode(bytes, i)?; + let (slashable_vote_data_2, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + CasperSlashing { + slashable_vote_data_1, + slashable_vote_data_2, + }, + i, + )) + } +} + +impl TreeHash for CasperSlashing { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slashable_vote_data_1.hash_tree_root()); + result.append(&mut self.slashable_vote_data_2.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for CasperSlashing { + fn random_for_test(rng: &mut T) -> Self { + Self { + slashable_vote_data_1: <_>::random_for_test(rng), + slashable_vote_data_2: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = CasperSlashing::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = CasperSlashing::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs new file mode 100644 index 000000000..3cb857ef4 --- /dev/null +++ b/eth2/types/src/crosslink.rs @@ -0,0 +1,91 @@ +use crate::test_utils::TestRandom; +use crate::{Epoch, Hash256}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, Clone, PartialEq, Default, Serialize, Hash)] +pub struct Crosslink { + pub epoch: Epoch, + pub shard_block_root: Hash256, +} + +impl Crosslink { + /// Generates a new instance where `dynasty` and `hash` are both zero. + pub fn zero() -> Self { + Self { + epoch: Epoch::new(0), + shard_block_root: Hash256::zero(), + } + } +} + +impl Encodable for Crosslink { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.epoch); + s.append(&self.shard_block_root); + } +} + +impl Decodable for Crosslink { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (epoch, i) = <_>::ssz_decode(bytes, i)?; + let (shard_block_root, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + epoch, + shard_block_root, + }, + i, + )) + } +} + +impl TreeHash for Crosslink { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.epoch.hash_tree_root()); + result.append(&mut self.shard_block_root.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Crosslink { + fn random_for_test(rng: &mut T) -> Self { + Self { + epoch: <_>::random_for_test(rng), + shard_block_root: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Crosslink::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Crosslink::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs new file mode 100644 index 000000000..62349cbc1 --- /dev/null +++ b/eth2/types/src/deposit.rs @@ -0,0 +1,87 @@ +use super::{DepositData, Hash256}; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct Deposit { + pub branch: Vec, + pub index: u64, + pub deposit_data: DepositData, +} + +impl Encodable for Deposit { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.branch); + s.append(&self.index); + s.append(&self.deposit_data); + } +} + +impl Decodable for Deposit { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (branch, i) = <_>::ssz_decode(bytes, i)?; + let (index, i) = <_>::ssz_decode(bytes, i)?; + let (deposit_data, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + branch, + index, + deposit_data, + }, + i, + )) + } +} + +impl TreeHash for Deposit { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.branch.hash_tree_root()); + result.append(&mut self.index.hash_tree_root()); + result.append(&mut self.deposit_data.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Deposit { + fn random_for_test(rng: &mut T) -> Self { + Self { + branch: <_>::random_for_test(rng), + index: <_>::random_for_test(rng), + deposit_data: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Deposit::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Deposit::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs new file mode 100644 index 000000000..5c8c302f4 --- /dev/null +++ b/eth2/types/src/deposit_data.rs @@ -0,0 +1,87 @@ +use super::DepositInput; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct DepositData { + pub amount: u64, + pub timestamp: u64, + pub deposit_input: DepositInput, +} + +impl Encodable for DepositData { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.amount); + s.append(&self.timestamp); + s.append(&self.deposit_input); + } +} + +impl Decodable for DepositData { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (amount, i) = <_>::ssz_decode(bytes, i)?; + let (timestamp, i) = <_>::ssz_decode(bytes, i)?; + let (deposit_input, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + amount, + timestamp, + deposit_input, + }, + i, + )) + } +} + +impl TreeHash for DepositData { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.amount.hash_tree_root()); + result.append(&mut self.timestamp.hash_tree_root()); + result.append(&mut self.deposit_input.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for DepositData { + fn random_for_test(rng: &mut T) -> Self { + Self { + amount: <_>::random_for_test(rng), + timestamp: <_>::random_for_test(rng), + deposit_input: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = DepositData::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = DepositData::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs new file mode 100644 index 000000000..fc53baae9 --- /dev/null +++ b/eth2/types/src/deposit_input.rs @@ -0,0 +1,88 @@ +use super::Hash256; +use crate::test_utils::TestRandom; +use bls::{PublicKey, Signature}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct DepositInput { + pub pubkey: PublicKey, + pub withdrawal_credentials: Hash256, + pub proof_of_possession: Signature, +} + +impl Encodable for DepositInput { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.pubkey); + s.append(&self.withdrawal_credentials); + s.append(&self.proof_of_possession); + } +} + +impl Decodable for DepositInput { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (pubkey, i) = <_>::ssz_decode(bytes, i)?; + let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?; + let (proof_of_possession, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + pubkey, + withdrawal_credentials, + proof_of_possession, + }, + i, + )) + } +} + +impl TreeHash for DepositInput { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.pubkey.hash_tree_root()); + result.append(&mut self.withdrawal_credentials.hash_tree_root()); + result.append(&mut self.proof_of_possession.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for DepositInput { + fn random_for_test(rng: &mut T) -> Self { + Self { + pubkey: <_>::random_for_test(rng), + withdrawal_credentials: <_>::random_for_test(rng), + proof_of_possession: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = DepositInput::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = DepositInput::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs new file mode 100644 index 000000000..6e9bb7d26 --- /dev/null +++ b/eth2/types/src/eth1_data.rs @@ -0,0 +1,82 @@ +use super::Hash256; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +// Note: this is refer to as DepositRootVote in specs +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct Eth1Data { + pub deposit_root: Hash256, + pub block_hash: Hash256, +} + +impl Encodable for Eth1Data { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.deposit_root); + s.append(&self.block_hash); + } +} + +impl Decodable for Eth1Data { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (deposit_root, i) = <_>::ssz_decode(bytes, i)?; + let (block_hash, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + deposit_root, + block_hash, + }, + i, + )) + } +} + +impl TreeHash for Eth1Data { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.deposit_root.hash_tree_root()); + result.append(&mut self.block_hash.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Eth1Data { + fn random_for_test(rng: &mut T) -> Self { + Self { + deposit_root: <_>::random_for_test(rng), + block_hash: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Eth1Data::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Eth1Data::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs new file mode 100644 index 000000000..2bfee4d02 --- /dev/null +++ b/eth2/types/src/eth1_data_vote.rs @@ -0,0 +1,82 @@ +use super::Eth1Data; +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +// Note: this is refer to as DepositRootVote in specs +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct Eth1DataVote { + pub eth1_data: Eth1Data, + pub vote_count: u64, +} + +impl Encodable for Eth1DataVote { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.eth1_data); + s.append(&self.vote_count); + } +} + +impl Decodable for Eth1DataVote { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (eth1_data, i) = <_>::ssz_decode(bytes, i)?; + let (vote_count, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + eth1_data, + vote_count, + }, + i, + )) + } +} + +impl TreeHash for Eth1DataVote { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.eth1_data.hash_tree_root()); + result.append(&mut self.vote_count.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Eth1DataVote { + fn random_for_test(rng: &mut T) -> Self { + Self { + eth1_data: <_>::random_for_test(rng), + vote_count: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Eth1DataVote::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Eth1DataVote::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/exit.rs b/eth2/types/src/exit.rs new file mode 100644 index 000000000..cd7746919 --- /dev/null +++ b/eth2/types/src/exit.rs @@ -0,0 +1,87 @@ +use crate::{test_utils::TestRandom, Epoch}; +use bls::Signature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct Exit { + pub epoch: Epoch, + pub validator_index: u64, + pub signature: Signature, +} + +impl Encodable for Exit { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.epoch); + s.append(&self.validator_index); + s.append(&self.signature); + } +} + +impl Decodable for Exit { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (epoch, i) = <_>::ssz_decode(bytes, i)?; + let (validator_index, i) = <_>::ssz_decode(bytes, i)?; + let (signature, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + epoch, + validator_index, + signature, + }, + i, + )) + } +} + +impl TreeHash for Exit { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.epoch.hash_tree_root()); + result.append(&mut self.validator_index.hash_tree_root()); + result.append(&mut self.signature.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Exit { + fn random_for_test(rng: &mut T) -> Self { + Self { + epoch: <_>::random_for_test(rng), + validator_index: <_>::random_for_test(rng), + signature: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Exit::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Exit::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs new file mode 100644 index 000000000..1c96a34ac --- /dev/null +++ b/eth2/types/src/fork.rs @@ -0,0 +1,86 @@ +use crate::{test_utils::TestRandom, Epoch}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Fork { + pub previous_version: u64, + pub current_version: u64, + pub epoch: Epoch, +} + +impl Encodable for Fork { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.previous_version); + s.append(&self.current_version); + s.append(&self.epoch); + } +} + +impl Decodable for Fork { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (previous_version, i) = <_>::ssz_decode(bytes, i)?; + let (current_version, i) = <_>::ssz_decode(bytes, i)?; + let (epoch, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + previous_version, + current_version, + epoch, + }, + i, + )) + } +} + +impl TreeHash for Fork { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.previous_version.hash_tree_root()); + result.append(&mut self.current_version.hash_tree_root()); + result.append(&mut self.epoch.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Fork { + fn random_for_test(rng: &mut T) -> Self { + Self { + previous_version: <_>::random_for_test(rng), + current_version: <_>::random_for_test(rng), + epoch: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Fork::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Fork::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/free_attestation.rs b/eth2/types/src/free_attestation.rs new file mode 100644 index 000000000..16d4f6728 --- /dev/null +++ b/eth2/types/src/free_attestation.rs @@ -0,0 +1,12 @@ +/// Note: this object does not actually exist in the spec. +/// +/// We use it for managing attestations that have not been aggregated. +use super::{AttestationData, Signature}; +use serde_derive::Serialize; + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct FreeAttestation { + pub data: AttestationData, + pub signature: Signature, + pub validator_index: u64, +} diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs new file mode 100644 index 000000000..ba88d43a1 --- /dev/null +++ b/eth2/types/src/lib.rs @@ -0,0 +1,75 @@ +pub mod test_utils; + +pub mod attestation; +pub mod attestation_data; +pub mod attestation_data_and_custody_bit; +pub mod attester_slashing; +pub mod beacon_block; +pub mod beacon_block_body; +pub mod beacon_state; +pub mod casper_slashing; +pub mod crosslink; +pub mod deposit; +pub mod deposit_data; +pub mod deposit_input; +pub mod eth1_data; +pub mod eth1_data_vote; +pub mod exit; +pub mod fork; +pub mod free_attestation; +pub mod pending_attestation; +pub mod proposal_signed_data; +pub mod proposer_slashing; +pub mod readers; +pub mod shard_reassignment_record; +pub mod slashable_attestation; +pub mod slashable_vote_data; +pub mod slot_epoch_height; +pub mod spec; +pub mod validator; +pub mod validator_registry; +pub mod validator_registry_delta_block; + +use ethereum_types::{H160, H256, U256}; +use std::collections::HashMap; + +pub use crate::attestation::Attestation; +pub use crate::attestation_data::AttestationData; +pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; +pub use crate::attester_slashing::AttesterSlashing; +pub use crate::beacon_block::BeaconBlock; +pub use crate::beacon_block_body::BeaconBlockBody; +pub use crate::beacon_state::BeaconState; +pub use crate::casper_slashing::CasperSlashing; +pub use crate::crosslink::Crosslink; +pub use crate::deposit::Deposit; +pub use crate::deposit_data::DepositData; +pub use crate::deposit_input::DepositInput; +pub use crate::eth1_data::Eth1Data; +pub use crate::eth1_data_vote::Eth1DataVote; +pub use crate::exit::Exit; +pub use crate::fork::Fork; +pub use crate::free_attestation::FreeAttestation; +pub use crate::pending_attestation::PendingAttestation; +pub use crate::proposal_signed_data::ProposalSignedData; +pub use crate::proposer_slashing::ProposerSlashing; +pub use crate::slashable_attestation::SlashableAttestation; +pub use crate::slashable_vote_data::SlashableVoteData; +pub use crate::slot_epoch_height::{Epoch, Slot}; +pub use crate::spec::ChainSpec; +pub use crate::validator::{StatusFlags as ValidatorStatusFlags, Validator}; +pub use crate::validator_registry_delta_block::ValidatorRegistryDeltaBlock; + +pub type Hash256 = H256; +pub type Address = H160; +pub type EthBalance = U256; +pub type Bitfield = boolean_bitfield::BooleanBitfield; +pub type BitfieldError = boolean_bitfield::Error; + +/// Maps a (slot, shard_id) to attestation_indices. +pub type AttesterMap = HashMap<(u64, u64), Vec>; + +/// Maps a slot to a block proposer. +pub type ProposerMap = HashMap; + +pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs new file mode 100644 index 000000000..25ec109d7 --- /dev/null +++ b/eth2/types/src/pending_attestation.rs @@ -0,0 +1,93 @@ +use crate::test_utils::TestRandom; +use crate::{AttestationData, Bitfield, Slot}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct PendingAttestation { + pub aggregation_bitfield: Bitfield, + pub data: AttestationData, + pub custody_bitfield: Bitfield, + pub inclusion_slot: Slot, +} + +impl Encodable for PendingAttestation { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.aggregation_bitfield); + s.append(&self.data); + s.append(&self.custody_bitfield); + s.append(&self.inclusion_slot); + } +} + +impl Decodable for PendingAttestation { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (aggregation_bitfield, i) = <_>::ssz_decode(bytes, i)?; + let (data, i) = <_>::ssz_decode(bytes, i)?; + let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?; + let (inclusion_slot, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + data, + aggregation_bitfield, + custody_bitfield, + inclusion_slot, + }, + i, + )) + } +} + +impl TreeHash for PendingAttestation { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.aggregation_bitfield.hash_tree_root()); + result.append(&mut self.data.hash_tree_root()); + result.append(&mut self.custody_bitfield.hash_tree_root()); + result.append(&mut self.inclusion_slot.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for PendingAttestation { + fn random_for_test(rng: &mut T) -> Self { + Self { + data: <_>::random_for_test(rng), + aggregation_bitfield: <_>::random_for_test(rng), + custody_bitfield: <_>::random_for_test(rng), + inclusion_slot: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = PendingAttestation::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = PendingAttestation::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/proposal_signed_data.rs b/eth2/types/src/proposal_signed_data.rs new file mode 100644 index 000000000..c57eb1e2a --- /dev/null +++ b/eth2/types/src/proposal_signed_data.rs @@ -0,0 +1,87 @@ +use crate::test_utils::TestRandom; +use crate::{Hash256, Slot}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct ProposalSignedData { + pub slot: Slot, + pub shard: u64, + pub block_root: Hash256, +} + +impl Encodable for ProposalSignedData { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.slot); + s.append(&self.shard); + s.append(&self.block_root); + } +} + +impl Decodable for ProposalSignedData { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (shard, i) = <_>::ssz_decode(bytes, i)?; + let (block_root, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + ProposalSignedData { + slot, + shard, + block_root, + }, + i, + )) + } +} + +impl TreeHash for ProposalSignedData { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.shard.hash_tree_root()); + result.append(&mut self.block_root.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for ProposalSignedData { + fn random_for_test(rng: &mut T) -> Self { + Self { + slot: <_>::random_for_test(rng), + shard: <_>::random_for_test(rng), + block_root: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ProposalSignedData::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ProposalSignedData::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs new file mode 100644 index 000000000..417d23dbc --- /dev/null +++ b/eth2/types/src/proposer_slashing.rs @@ -0,0 +1,100 @@ +use super::ProposalSignedData; +use crate::test_utils::TestRandom; +use bls::Signature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ProposerSlashing { + pub proposer_index: u64, + pub proposal_data_1: ProposalSignedData, + pub proposal_signature_1: Signature, + pub proposal_data_2: ProposalSignedData, + pub proposal_signature_2: Signature, +} + +impl Encodable for ProposerSlashing { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.proposer_index); + s.append(&self.proposal_data_1); + s.append(&self.proposal_signature_1); + s.append(&self.proposal_data_2); + s.append(&self.proposal_signature_2); + } +} + +impl Decodable for ProposerSlashing { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (proposer_index, i) = <_>::ssz_decode(bytes, i)?; + let (proposal_data_1, i) = <_>::ssz_decode(bytes, i)?; + let (proposal_signature_1, i) = <_>::ssz_decode(bytes, i)?; + let (proposal_data_2, i) = <_>::ssz_decode(bytes, i)?; + let (proposal_signature_2, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + ProposerSlashing { + proposer_index, + proposal_data_1, + proposal_signature_1, + proposal_data_2, + proposal_signature_2, + }, + i, + )) + } +} + +impl TreeHash for ProposerSlashing { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.proposer_index.hash_tree_root()); + result.append(&mut self.proposal_data_1.hash_tree_root()); + result.append(&mut self.proposal_signature_1.hash_tree_root()); + result.append(&mut self.proposal_data_2.hash_tree_root()); + result.append(&mut self.proposal_signature_2.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for ProposerSlashing { + fn random_for_test(rng: &mut T) -> Self { + Self { + proposer_index: <_>::random_for_test(rng), + proposal_data_1: <_>::random_for_test(rng), + proposal_signature_1: <_>::random_for_test(rng), + proposal_data_2: <_>::random_for_test(rng), + proposal_signature_2: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ProposerSlashing::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ProposerSlashing::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/readers/block_reader.rs b/eth2/types/src/readers/block_reader.rs new file mode 100644 index 000000000..bcb2d0e63 --- /dev/null +++ b/eth2/types/src/readers/block_reader.rs @@ -0,0 +1,40 @@ +use crate::{BeaconBlock, Hash256, Slot}; +use std::fmt::Debug; + +/// The `BeaconBlockReader` provides interfaces for reading a subset of fields of a `BeaconBlock`. +/// +/// The purpose of this trait is to allow reading from either; +/// - a standard `BeaconBlock` struct, or +/// - a SSZ serialized byte array. +/// +/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for +/// "future proofing". +pub trait BeaconBlockReader: Debug + PartialEq { + fn slot(&self) -> Slot; + fn parent_root(&self) -> Hash256; + fn state_root(&self) -> Hash256; + fn canonical_root(&self) -> Hash256; + fn into_beacon_block(self) -> Option; +} + +impl BeaconBlockReader for BeaconBlock { + fn slot(&self) -> Slot { + self.slot + } + + fn parent_root(&self) -> Hash256 { + self.parent_root + } + + fn state_root(&self) -> Hash256 { + self.state_root + } + + fn canonical_root(&self) -> Hash256 { + self.canonical_root() + } + + fn into_beacon_block(self) -> Option { + Some(self) + } +} diff --git a/eth2/types/src/readers/mod.rs b/eth2/types/src/readers/mod.rs new file mode 100644 index 000000000..4ccb14a8c --- /dev/null +++ b/eth2/types/src/readers/mod.rs @@ -0,0 +1,5 @@ +mod block_reader; +mod state_reader; + +pub use self::block_reader::BeaconBlockReader; +pub use self::state_reader::BeaconStateReader; diff --git a/eth2/types/src/readers/state_reader.rs b/eth2/types/src/readers/state_reader.rs new file mode 100644 index 000000000..92a870855 --- /dev/null +++ b/eth2/types/src/readers/state_reader.rs @@ -0,0 +1,30 @@ +use crate::{BeaconState, Hash256, Slot}; +use std::fmt::Debug; + +/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`. +/// +/// The purpose of this trait is to allow reading from either; +/// - a standard `BeaconState` struct, or +/// - a SSZ serialized byte array. +/// +/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for +/// "future proofing". +pub trait BeaconStateReader: Debug + PartialEq { + fn slot(&self) -> Slot; + fn canonical_root(&self) -> Hash256; + fn into_beacon_state(self) -> Option; +} + +impl BeaconStateReader for BeaconState { + fn slot(&self) -> Slot { + self.slot + } + + fn canonical_root(&self) -> Hash256 { + self.canonical_root() + } + + fn into_beacon_state(self) -> Option { + Some(self) + } +} diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs new file mode 100644 index 000000000..61f68ac05 --- /dev/null +++ b/eth2/types/src/shard_reassignment_record.rs @@ -0,0 +1,86 @@ +use crate::{test_utils::TestRandom, Slot}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct ShardReassignmentRecord { + pub validator_index: u64, + pub shard: u64, + pub slot: Slot, +} + +impl Encodable for ShardReassignmentRecord { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.validator_index); + s.append(&self.shard); + s.append(&self.slot); + } +} + +impl Decodable for ShardReassignmentRecord { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (validator_index, i) = <_>::ssz_decode(bytes, i)?; + let (shard, i) = <_>::ssz_decode(bytes, i)?; + let (slot, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + validator_index, + shard, + slot, + }, + i, + )) + } +} + +impl TreeHash for ShardReassignmentRecord { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.validator_index.hash_tree_root()); + result.append(&mut self.shard.hash_tree_root()); + result.append(&mut self.slot.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for ShardReassignmentRecord { + fn random_for_test(rng: &mut T) -> Self { + Self { + validator_index: <_>::random_for_test(rng), + shard: <_>::random_for_test(rng), + slot: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ShardReassignmentRecord::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ShardReassignmentRecord::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs new file mode 100644 index 000000000..6d83ad147 --- /dev/null +++ b/eth2/types/src/slashable_attestation.rs @@ -0,0 +1,92 @@ +use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct SlashableAttestation { + pub validator_indices: Vec, + pub data: AttestationData, + pub custody_bitfield: Bitfield, + pub aggregate_signature: AggregateSignature, +} + +impl Encodable for SlashableAttestation { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.validator_indices); + s.append(&self.data); + s.append(&self.custody_bitfield); + s.append(&self.aggregate_signature); + } +} + +impl Decodable for SlashableAttestation { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (validator_indices, i) = <_>::ssz_decode(bytes, i)?; + let (data, i) = <_>::ssz_decode(bytes, i)?; + let (custody_bitfield, i) = <_>::ssz_decode(bytes, i)?; + let (aggregate_signature, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + SlashableAttestation { + validator_indices, + data, + custody_bitfield, + aggregate_signature, + }, + i, + )) + } +} + +impl TreeHash for SlashableAttestation { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.validator_indices.hash_tree_root()); + result.append(&mut self.data.hash_tree_root()); + result.append(&mut self.custody_bitfield.hash_tree_root()); + result.append(&mut self.aggregate_signature.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for SlashableAttestation { + fn random_for_test(rng: &mut T) -> Self { + Self { + validator_indices: <_>::random_for_test(rng), + data: <_>::random_for_test(rng), + custody_bitfield: <_>::random_for_test(rng), + aggregate_signature: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = SlashableAttestation::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = SlashableAttestation::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/slashable_vote_data.rs b/eth2/types/src/slashable_vote_data.rs new file mode 100644 index 000000000..acffca26d --- /dev/null +++ b/eth2/types/src/slashable_vote_data.rs @@ -0,0 +1,94 @@ +use super::AttestationData; +use crate::test_utils::TestRandom; +use bls::AggregateSignature; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct SlashableVoteData { + pub custody_bit_0_indices: Vec, + pub custody_bit_1_indices: Vec, + pub data: AttestationData, + pub aggregate_signature: AggregateSignature, +} + +impl Encodable for SlashableVoteData { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.custody_bit_0_indices); + s.append_vec(&self.custody_bit_1_indices); + s.append(&self.data); + s.append(&self.aggregate_signature); + } +} + +impl Decodable for SlashableVoteData { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (custody_bit_0_indices, i) = <_>::ssz_decode(bytes, i)?; + let (custody_bit_1_indices, i) = <_>::ssz_decode(bytes, i)?; + let (data, i) = <_>::ssz_decode(bytes, i)?; + let (aggregate_signature, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + SlashableVoteData { + custody_bit_0_indices, + custody_bit_1_indices, + data, + aggregate_signature, + }, + i, + )) + } +} + +impl TreeHash for SlashableVoteData { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.custody_bit_0_indices.hash_tree_root()); + result.append(&mut self.custody_bit_1_indices.hash_tree_root()); + result.append(&mut self.data.hash_tree_root()); + result.append(&mut self.aggregate_signature.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for SlashableVoteData { + fn random_for_test(rng: &mut T) -> Self { + Self { + custody_bit_0_indices: <_>::random_for_test(rng), + custody_bit_1_indices: <_>::random_for_test(rng), + data: <_>::random_for_test(rng), + aggregate_signature: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = SlashableVoteData::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = SlashableVoteData::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/slot_epoch_height.rs b/eth2/types/src/slot_epoch_height.rs new file mode 100644 index 000000000..4f6b50b3a --- /dev/null +++ b/eth2/types/src/slot_epoch_height.rs @@ -0,0 +1,763 @@ +/// The `Slot` `Epoch`, `Height` types are defined as newtypes over u64 to enforce type-safety between +/// the three types. +/// +/// `Slot`, `Epoch` and `Height` have implementations which permit conversion, comparison and math operations +/// between each and `u64`, however specifically not between each other. +/// +/// All math operations on `Slot` and `Epoch` are saturating, they never wrap. +/// +/// It would be easy to define `PartialOrd` and other traits generically across all types which +/// implement `Into`, however this would allow operations between `Slots`, `Epochs` and +/// `Heights` which may lead to programming errors which are not detected by the compiler. +use crate::test_utils::TestRandom; +use rand::RngCore; +use serde_derive::Serialize; +use slog; +use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use std::cmp::{Ord, Ordering}; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::iter::Iterator; +use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; + +macro_rules! impl_from_into_u64 { + ($main: ident) => { + impl From for $main { + fn from(n: u64) -> $main { + $main(n) + } + } + + impl Into for $main { + fn into(self) -> u64 { + self.0 + } + } + + impl $main { + pub fn as_u64(&self) -> u64 { + self.0 + } + } + }; +} + +// need to truncate for some fork-choice algorithms +macro_rules! impl_into_u32 { + ($main: ident) => { + impl Into for $main { + fn into(self) -> u32 { + self.0 as u32 + } + } + + impl $main { + pub fn as_u32(&self) -> u32 { + self.0 as u32 + } + } + }; +} + +macro_rules! impl_from_into_usize { + ($main: ident) => { + impl From for $main { + fn from(n: usize) -> $main { + $main(n as u64) + } + } + + impl Into for $main { + fn into(self) -> usize { + self.0 as usize + } + } + + impl $main { + pub fn as_usize(&self) -> usize { + self.0 as usize + } + } + }; +} + +macro_rules! impl_math_between { + ($main: ident, $other: ident) => { + impl PartialOrd<$other> for $main { + /// Utilizes `partial_cmp` on the underlying `u64`. + fn partial_cmp(&self, other: &$other) -> Option { + Some(self.0.cmp(&(*other).into())) + } + } + + impl PartialEq<$other> for $main { + fn eq(&self, other: &$other) -> bool { + let other: u64 = (*other).into(); + self.0 == other + } + } + + impl Add<$other> for $main { + type Output = $main; + + fn add(self, other: $other) -> $main { + $main::from(self.0.saturating_add(other.into())) + } + } + + impl AddAssign<$other> for $main { + fn add_assign(&mut self, other: $other) { + self.0 = self.0.saturating_add(other.into()); + } + } + + impl Sub<$other> for $main { + type Output = $main; + + fn sub(self, other: $other) -> $main { + $main::from(self.0.saturating_sub(other.into())) + } + } + + impl SubAssign<$other> for $main { + fn sub_assign(&mut self, other: $other) { + self.0 = self.0.saturating_sub(other.into()); + } + } + + impl Mul<$other> for $main { + type Output = $main; + + fn mul(self, rhs: $other) -> $main { + let rhs: u64 = rhs.into(); + $main::from(self.0.saturating_mul(rhs)) + } + } + + impl MulAssign<$other> for $main { + fn mul_assign(&mut self, rhs: $other) { + let rhs: u64 = rhs.into(); + self.0 = self.0.saturating_mul(rhs) + } + } + + impl Div<$other> for $main { + type Output = $main; + + fn div(self, rhs: $other) -> $main { + let rhs: u64 = rhs.into(); + if rhs == 0 { + panic!("Cannot divide by zero-valued Slot/Epoch") + } + $main::from(self.0 / rhs) + } + } + + impl DivAssign<$other> for $main { + fn div_assign(&mut self, rhs: $other) { + let rhs: u64 = rhs.into(); + if rhs == 0 { + panic!("Cannot divide by zero-valued Slot/Epoch") + } + self.0 = self.0 / rhs + } + } + + impl Rem<$other> for $main { + type Output = $main; + + fn rem(self, modulus: $other) -> $main { + let modulus: u64 = modulus.into(); + $main::from(self.0 % modulus) + } + } + }; +} + +macro_rules! impl_math { + ($type: ident) => { + impl $type { + pub fn saturating_sub>(&self, other: T) -> $type { + *self - other.into() + } + + pub fn saturating_add>(&self, other: T) -> $type { + *self + other.into() + } + + pub fn checked_div>(&self, rhs: T) -> Option<$type> { + let rhs: $type = rhs.into(); + if rhs == 0 { + None + } else { + Some(*self / rhs) + } + } + + pub fn is_power_of_two(&self) -> bool { + self.0.is_power_of_two() + } + } + + impl Ord for $type { + fn cmp(&self, other: &$type) -> Ordering { + let other: u64 = (*other).into(); + self.0.cmp(&other) + } + } + }; +} + +macro_rules! impl_display { + ($type: ident) => { + impl fmt::Display for $type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } + } + + impl slog::Value for $type { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut slog::Serializer, + ) -> slog::Result { + self.0.serialize(record, key, serializer) + } + } + }; +} + +macro_rules! impl_ssz { + ($type: ident) => { + impl Encodable for $type { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.0); + } + } + + impl Decodable for $type { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (value, i) = <_>::ssz_decode(bytes, i)?; + + Ok(($type(value), i)) + } + } + + impl TreeHash for $type { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.0.hash_tree_root()); + hash(&result) + } + } + + impl TestRandom for $type { + fn random_for_test(rng: &mut T) -> Self { + $type::from(u64::random_for_test(rng)) + } + } + }; +} + +macro_rules! impl_hash { + ($type: ident) => { + // Implemented to stop clippy lint: + // https://rust-lang.github.io/rust-clippy/master/index.html#derive_hash_xor_eq + impl Hash for $type { + fn hash(&self, state: &mut H) { + ssz_encode(self).hash(state) + } + } + }; +} + +macro_rules! impl_common { + ($type: ident) => { + impl_from_into_u64!($type); + impl_from_into_usize!($type); + impl_math_between!($type, $type); + impl_math_between!($type, u64); + impl_math!($type); + impl_display!($type); + impl_ssz!($type); + impl_hash!($type); + }; +} + +/// Beacon block slot. +#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] +pub struct Slot(u64); + +/// Beacon block height, effectively `Slot/GENESIS_START_BLOCK`. +#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] +pub struct Height(u64); + +/// Beacon Epoch, effectively `Slot / EPOCH_LENGTH`. +#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] +pub struct Epoch(u64); + +impl_common!(Slot); +impl_common!(Height); +impl_into_u32!(Height); // height can be converted to u32 +impl_common!(Epoch); + +impl Slot { + pub fn new(slot: u64) -> Slot { + Slot(slot) + } + + pub fn epoch(self, epoch_length: u64) -> Epoch { + Epoch::from(self.0 / epoch_length) + } + + pub fn height(self, genesis_slot: Slot) -> Height { + Height::from(self.0.saturating_sub(genesis_slot.as_u64())) + } + + pub fn max_value() -> Slot { + Slot(u64::max_value()) + } +} + +impl Height { + pub fn new(slot: u64) -> Height { + Height(slot) + } + + pub fn slot(self, genesis_slot: Slot) -> Slot { + Slot::from(self.0.saturating_add(genesis_slot.as_u64())) + } + + pub fn epoch(self, genesis_slot: u64, epoch_length: u64) -> Epoch { + Epoch::from(self.0.saturating_add(genesis_slot) / epoch_length) + } + + pub fn max_value() -> Height { + Height(u64::max_value()) + } +} + +impl Epoch { + pub fn new(slot: u64) -> Epoch { + Epoch(slot) + } + + pub fn max_value() -> Epoch { + Epoch(u64::max_value()) + } + + pub fn start_slot(self, epoch_length: u64) -> Slot { + Slot::from(self.0.saturating_mul(epoch_length)) + } + + pub fn end_slot(self, epoch_length: u64) -> Slot { + Slot::from( + self.0 + .saturating_add(1) + .saturating_mul(epoch_length) + .saturating_sub(1), + ) + } + + pub fn slot_iter(&self, epoch_length: u64) -> SlotIter { + SlotIter { + current: self.start_slot(epoch_length), + epoch: self, + epoch_length, + } + } +} + +pub struct SlotIter<'a> { + current: Slot, + epoch: &'a Epoch, + epoch_length: u64, +} + +impl<'a> Iterator for SlotIter<'a> { + type Item = Slot; + + fn next(&mut self) -> Option { + if self.current == self.epoch.end_slot(self.epoch_length) { + None + } else { + let previous = self.current; + self.current += 1; + Some(previous) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! new_tests { + ($type: ident) => { + #[test] + fn new() { + assert_eq!($type(0), $type::new(0)); + assert_eq!($type(3), $type::new(3)); + assert_eq!($type(u64::max_value()), $type::new(u64::max_value())); + } + }; + } + + macro_rules! from_into_tests { + ($type: ident, $other: ident) => { + #[test] + fn into() { + let x: $other = $type(0).into(); + assert_eq!(x, 0); + + let x: $other = $type(3).into(); + assert_eq!(x, 3); + + let x: $other = $type(u64::max_value()).into(); + // Note: this will fail on 32 bit systems. This is expected as we don't have a proper + // 32-bit system strategy in place. + assert_eq!(x, $other::max_value()); + } + + #[test] + fn from() { + assert_eq!($type(0), $type::from(0_u64)); + assert_eq!($type(3), $type::from(3_u64)); + assert_eq!($type(u64::max_value()), $type::from($other::max_value())); + } + }; + } + + macro_rules! math_between_tests { + ($type: ident, $other: ident) => { + #[test] + fn partial_ord() { + let assert_partial_ord = |a: u64, partial_ord: Ordering, b: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a).partial_cmp(&other), Some(partial_ord)); + }; + + assert_partial_ord(1, Ordering::Less, 2); + assert_partial_ord(2, Ordering::Greater, 1); + assert_partial_ord(0, Ordering::Less, u64::max_value()); + assert_partial_ord(u64::max_value(), Ordering::Greater, 0); + } + + #[test] + fn partial_eq() { + let assert_partial_eq = |a: u64, b: u64, is_equal: bool| { + let other: $other = $type(b).into(); + assert_eq!($type(a).eq(&other), is_equal); + }; + + assert_partial_eq(0, 0, true); + assert_partial_eq(0, 1, false); + assert_partial_eq(1, 0, false); + assert_partial_eq(1, 1, true); + + assert_partial_eq(u64::max_value(), u64::max_value(), true); + assert_partial_eq(0, u64::max_value(), false); + assert_partial_eq(u64::max_value(), 0, false); + } + + #[test] + fn add_and_add_assign() { + let assert_add = |a: u64, b: u64, result: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a) + other, $type(result)); + + let mut add_assigned = $type(a); + add_assigned += other; + + assert_eq!(add_assigned, $type(result)); + }; + + assert_add(0, 1, 1); + assert_add(1, 0, 1); + assert_add(1, 2, 3); + assert_add(2, 1, 3); + assert_add(7, 7, 14); + + // Addition should be saturating. + assert_add(u64::max_value(), 1, u64::max_value()); + assert_add(u64::max_value(), u64::max_value(), u64::max_value()); + } + + #[test] + fn sub_and_sub_assign() { + let assert_sub = |a: u64, b: u64, result: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a) - other, $type(result)); + + let mut sub_assigned = $type(a); + sub_assigned -= other; + + assert_eq!(sub_assigned, $type(result)); + }; + + assert_sub(1, 0, 1); + assert_sub(2, 1, 1); + assert_sub(14, 7, 7); + assert_sub(u64::max_value(), 1, u64::max_value() - 1); + assert_sub(u64::max_value(), u64::max_value(), 0); + + // Subtraction should be saturating + assert_sub(0, 1, 0); + assert_sub(1, 2, 0); + } + + #[test] + fn mul_and_mul_assign() { + let assert_mul = |a: u64, b: u64, result: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a) * other, $type(result)); + + let mut mul_assigned = $type(a); + mul_assigned *= other; + + assert_eq!(mul_assigned, $type(result)); + }; + + assert_mul(2, 2, 4); + assert_mul(1, 2, 2); + assert_mul(0, 2, 0); + + // Multiplication should be saturating. + assert_mul(u64::max_value(), 2, u64::max_value()); + } + + #[test] + fn div_and_div_assign() { + let assert_div = |a: u64, b: u64, result: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a) / other, $type(result)); + + let mut div_assigned = $type(a); + div_assigned /= other; + + assert_eq!(div_assigned, $type(result)); + }; + + assert_div(0, 2, 0); + assert_div(2, 2, 1); + assert_div(100, 50, 2); + assert_div(128, 2, 64); + assert_div(u64::max_value(), 2, 2_u64.pow(63) - 1); + } + + #[test] + #[should_panic] + fn div_panics_with_divide_by_zero() { + let other: $other = $type(0).into(); + let _ = $type(2) / other; + } + + #[test] + #[should_panic] + fn div_assign_panics_with_divide_by_zero() { + let other: $other = $type(0).into(); + let mut assigned = $type(2); + assigned /= other; + } + + #[test] + fn rem() { + let assert_rem = |a: u64, b: u64, result: u64| { + let other: $other = $type(b).into(); + assert_eq!($type(a) % other, $type(result)); + }; + + assert_rem(3, 2, 1); + assert_rem(40, 2, 0); + assert_rem(10, 100, 10); + assert_rem(302042, 3293, 2379); + } + }; + } + + macro_rules! math_tests { + ($type: ident) => { + #[test] + fn saturating_sub() { + let assert_saturating_sub = |a: u64, b: u64, result: u64| { + assert_eq!($type(a).saturating_sub($type(b)), $type(result)); + }; + + assert_saturating_sub(1, 0, 1); + assert_saturating_sub(2, 1, 1); + assert_saturating_sub(14, 7, 7); + assert_saturating_sub(u64::max_value(), 1, u64::max_value() - 1); + assert_saturating_sub(u64::max_value(), u64::max_value(), 0); + + // Subtraction should be saturating + assert_saturating_sub(0, 1, 0); + assert_saturating_sub(1, 2, 0); + } + + #[test] + fn saturating_add() { + let assert_saturating_add = |a: u64, b: u64, result: u64| { + assert_eq!($type(a).saturating_add($type(b)), $type(result)); + }; + + assert_saturating_add(0, 1, 1); + assert_saturating_add(1, 0, 1); + assert_saturating_add(1, 2, 3); + assert_saturating_add(2, 1, 3); + assert_saturating_add(7, 7, 14); + + // Addition should be saturating. + assert_saturating_add(u64::max_value(), 1, u64::max_value()); + assert_saturating_add(u64::max_value(), u64::max_value(), u64::max_value()); + } + + #[test] + fn checked_div() { + let assert_checked_div = |a: u64, b: u64, result: Option| { + let division_result_as_u64 = match $type(a).checked_div($type(b)) { + None => None, + Some(val) => Some(val.as_u64()), + }; + assert_eq!(division_result_as_u64, result); + }; + + assert_checked_div(0, 2, Some(0)); + assert_checked_div(2, 2, Some(1)); + assert_checked_div(100, 50, Some(2)); + assert_checked_div(128, 2, Some(64)); + assert_checked_div(u64::max_value(), 2, Some(2_u64.pow(63) - 1)); + + assert_checked_div(2, 0, None); + assert_checked_div(0, 0, None); + assert_checked_div(u64::max_value(), 0, None); + } + + #[test] + fn is_power_of_two() { + let assert_is_power_of_two = |a: u64, result: bool| { + assert_eq!( + $type(a).is_power_of_two(), + result, + "{}.is_power_of_two() != {}", + a, + result + ); + }; + + assert_is_power_of_two(0, false); + assert_is_power_of_two(1, true); + assert_is_power_of_two(2, true); + assert_is_power_of_two(3, false); + assert_is_power_of_two(4, true); + + assert_is_power_of_two(2_u64.pow(4), true); + assert_is_power_of_two(u64::max_value(), false); + } + + #[test] + fn ord() { + let assert_ord = |a: u64, ord: Ordering, b: u64| { + assert_eq!($type(a).cmp(&$type(b)), ord); + }; + + assert_ord(1, Ordering::Less, 2); + assert_ord(2, Ordering::Greater, 1); + assert_ord(0, Ordering::Less, u64::max_value()); + assert_ord(u64::max_value(), Ordering::Greater, 0); + } + }; + } + + macro_rules! ssz_tests { + ($type: ident) => { + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = $type::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = $type::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } + }; + } + + macro_rules! all_tests { + ($type: ident) => { + new_tests!($type); + math_between_tests!($type, $type); + math_tests!($type); + ssz_tests!($type); + + mod u64_tests { + use super::*; + + from_into_tests!($type, u64); + math_between_tests!($type, u64); + + #[test] + pub fn as_64() { + let x = $type(0).as_u64(); + assert_eq!(x, 0); + + let x = $type(3).as_u64(); + assert_eq!(x, 3); + + let x = $type(u64::max_value()).as_u64(); + assert_eq!(x, u64::max_value()); + } + } + + mod usize_tests { + use super::*; + + from_into_tests!($type, usize); + + #[test] + pub fn as_usize() { + let x = $type(0).as_usize(); + assert_eq!(x, 0); + + let x = $type(3).as_usize(); + assert_eq!(x, 3); + + let x = $type(u64::max_value()).as_usize(); + assert_eq!(x, usize::max_value()); + } + } + }; + } + + #[cfg(test)] + mod slot_tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + all_tests!(Slot); + } + + #[cfg(test)] + mod epoch_tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + all_tests!(Epoch); + } +} diff --git a/eth2/types/src/spec/foundation.rs b/eth2/types/src/spec/foundation.rs new file mode 100644 index 000000000..79abe4061 --- /dev/null +++ b/eth2/types/src/spec/foundation.rs @@ -0,0 +1,111 @@ +use crate::{Address, ChainSpec, Epoch, Hash256, Signature, Slot}; + +const GWEI: u64 = 1_000_000_000; + +impl ChainSpec { + /// Returns a `ChainSpec` compatible with the specification from Ethereum Foundation. + /// + /// Of course, the actual foundation specs are unknown at this point so these are just a rough + /// estimate. + /// + /// Spec v0.2.0 + pub fn foundation() -> Self { + let genesis_slot = Slot::new(2_u64.pow(19)); + let epoch_length = 64; + let genesis_epoch = genesis_slot.epoch(epoch_length); + + Self { + /* + * Misc + */ + shard_count: 1_024, + target_committee_size: 128, + max_balance_churn_quotient: 32, + beacon_chain_shard_number: u64::max_value(), + max_indices_per_slashable_vote: 4_096, + max_withdrawals_per_epoch: 4, + shuffle_round_count: 90, + + /* + * Deposit contract + */ + deposit_contract_address: Address::zero(), + deposit_contract_tree_depth: 32, + + /* + * Gwei values + */ + min_deposit_amount: u64::pow(2, 0) * GWEI, + max_deposit_amount: u64::pow(2, 5) * GWEI, + fork_choice_balance_increment: u64::pow(2, 0) * GWEI, + ejection_balance: u64::pow(2, 4) * GWEI, + + /* + * Initial Values + */ + genesis_fork_version: 0, + genesis_slot: Slot::new(2_u64.pow(19)), + genesis_epoch, + genesis_start_shard: 0, + far_future_epoch: Epoch::new(u64::max_value()), + zero_hash: Hash256::zero(), + empty_signature: Signature::empty_signature(), + bls_withdrawal_prefix_byte: 0, + + /* + * Time parameters + */ + slot_duration: 6, + min_attestation_inclusion_delay: 4, + epoch_length, + seed_lookahead: Epoch::new(1), + entry_exit_delay: 4, + eth1_data_voting_period: 16, + min_validator_withdrawal_epochs: Epoch::new(256), + + /* + * State list lengths + */ + latest_block_roots_length: 8_192, + latest_randao_mixes_length: 8_192, + latest_index_roots_length: 8_192, + latest_penalized_exit_length: 8_192, + + /* + * Reward and penalty quotients + */ + base_reward_quotient: 32, + whistleblower_reward_quotient: 512, + includer_reward_quotient: 8, + inactivity_penalty_quotient: 16_777_216, + + /* + * Max operations per block + */ + max_proposer_slashings: 16, + max_attester_slashings: 1, + max_attestations: 128, + max_deposits: 16, + max_exits: 16, + + /* + * Signature domains + */ + domain_deposit: 0, + domain_attestation: 1, + domain_proposal: 2, + domain_exit: 3, + domain_randao: 4, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_foundation_spec_can_be_constructed() { + let _ = ChainSpec::foundation(); + } +} diff --git a/eth2/types/src/spec/mod.rs b/eth2/types/src/spec/mod.rs new file mode 100644 index 000000000..53c78a2c2 --- /dev/null +++ b/eth2/types/src/spec/mod.rs @@ -0,0 +1,92 @@ +mod foundation; + +use crate::{Address, Epoch, Hash256, Slot}; +use bls::Signature; + +/// Holds all the "constants" for a BeaconChain. +/// +/// Spec v0.2.0 +#[derive(PartialEq, Debug, Clone)] +pub struct ChainSpec { + /* + * Misc + */ + pub shard_count: u64, + pub target_committee_size: u64, + pub max_balance_churn_quotient: u64, + pub beacon_chain_shard_number: u64, + pub max_indices_per_slashable_vote: u64, + pub max_withdrawals_per_epoch: u64, + pub shuffle_round_count: u64, + + /* + * Deposit contract + */ + pub deposit_contract_address: Address, + pub deposit_contract_tree_depth: u64, + + /* + * Gwei values + */ + pub min_deposit_amount: u64, + pub max_deposit_amount: u64, + pub fork_choice_balance_increment: u64, + pub ejection_balance: u64, + + /* + * Initial Values + */ + pub genesis_fork_version: u64, + pub genesis_slot: Slot, + pub genesis_epoch: Epoch, + pub genesis_start_shard: u64, + pub far_future_epoch: Epoch, + pub zero_hash: Hash256, + pub empty_signature: Signature, + pub bls_withdrawal_prefix_byte: u8, + + /* + * Time parameters + */ + pub slot_duration: u64, + pub min_attestation_inclusion_delay: u64, + pub epoch_length: u64, + pub seed_lookahead: Epoch, + pub entry_exit_delay: u64, + pub eth1_data_voting_period: u64, + pub min_validator_withdrawal_epochs: Epoch, + + /* + * State list lengths + */ + pub latest_block_roots_length: usize, + pub latest_randao_mixes_length: usize, + pub latest_index_roots_length: usize, + pub latest_penalized_exit_length: usize, + + /* + * Reward and penalty quotients + */ + pub base_reward_quotient: u64, + pub whistleblower_reward_quotient: u64, + pub includer_reward_quotient: u64, + pub inactivity_penalty_quotient: u64, + + /* + * Max operations per block + */ + pub max_proposer_slashings: u64, + pub max_attester_slashings: u64, + pub max_attestations: u64, + pub max_deposits: u64, + pub max_exits: u64, + + /* + * Signature domains + */ + pub domain_deposit: u64, + pub domain_attestation: u64, + pub domain_proposal: u64, + pub domain_exit: u64, + pub domain_randao: u64, +} diff --git a/eth2/types/src/test_utils/address.rs b/eth2/types/src/test_utils/address.rs new file mode 100644 index 000000000..2d60b72da --- /dev/null +++ b/eth2/types/src/test_utils/address.rs @@ -0,0 +1,11 @@ +use super::TestRandom; +use crate::Address; +use rand::RngCore; + +impl TestRandom for Address { + fn random_for_test(rng: &mut T) -> Self { + let mut key_bytes = vec![0; 20]; + rng.fill_bytes(&mut key_bytes); + Address::from(&key_bytes[..]) + } +} diff --git a/eth2/types/src/test_utils/aggregate_signature.rs b/eth2/types/src/test_utils/aggregate_signature.rs new file mode 100644 index 000000000..6a15f7366 --- /dev/null +++ b/eth2/types/src/test_utils/aggregate_signature.rs @@ -0,0 +1,12 @@ +use super::TestRandom; +use bls::{AggregateSignature, Signature}; +use rand::RngCore; + +impl TestRandom for AggregateSignature { + fn random_for_test(rng: &mut T) -> Self { + let signature = Signature::random_for_test(rng); + let mut aggregate_signature = AggregateSignature::new(); + aggregate_signature.add(&signature); + aggregate_signature + } +} diff --git a/eth2/types/src/test_utils/bitfield.rs b/eth2/types/src/test_utils/bitfield.rs new file mode 100644 index 000000000..15011edd9 --- /dev/null +++ b/eth2/types/src/test_utils/bitfield.rs @@ -0,0 +1,11 @@ +use super::super::Bitfield; +use super::TestRandom; +use rand::RngCore; + +impl TestRandom for Bitfield { + fn random_for_test(rng: &mut T) -> Self { + let mut raw_bytes = vec![0; 32]; + rng.fill_bytes(&mut raw_bytes); + Bitfield::from_bytes(&raw_bytes) + } +} diff --git a/eth2/types/src/test_utils/hash256.rs b/eth2/types/src/test_utils/hash256.rs new file mode 100644 index 000000000..98f5e7899 --- /dev/null +++ b/eth2/types/src/test_utils/hash256.rs @@ -0,0 +1,11 @@ +use super::TestRandom; +use crate::Hash256; +use rand::RngCore; + +impl TestRandom for Hash256 { + fn random_for_test(rng: &mut T) -> Self { + let mut key_bytes = vec![0; 32]; + rng.fill_bytes(&mut key_bytes); + Hash256::from(&key_bytes[..]) + } +} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs new file mode 100644 index 000000000..eb54f2a53 --- /dev/null +++ b/eth2/types/src/test_utils/mod.rs @@ -0,0 +1,49 @@ +use rand::RngCore; + +pub use rand::{prng::XorShiftRng, SeedableRng}; + +pub mod address; +pub mod aggregate_signature; +pub mod bitfield; +pub mod hash256; +pub mod public_key; +pub mod secret_key; +pub mod signature; + +pub trait TestRandom +where + T: RngCore, +{ + fn random_for_test(rng: &mut T) -> Self; +} + +impl TestRandom for u64 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u64() + } +} + +impl TestRandom for u32 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() + } +} + +impl TestRandom for usize { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() as usize + } +} + +impl TestRandom for Vec +where + U: TestRandom, +{ + fn random_for_test(rng: &mut T) -> Self { + vec![ + ::random_for_test(rng), + ::random_for_test(rng), + ::random_for_test(rng), + ] + } +} diff --git a/eth2/types/src/test_utils/public_key.rs b/eth2/types/src/test_utils/public_key.rs new file mode 100644 index 000000000..bfccd3e53 --- /dev/null +++ b/eth2/types/src/test_utils/public_key.rs @@ -0,0 +1,10 @@ +use super::TestRandom; +use bls::{PublicKey, SecretKey}; +use rand::RngCore; + +impl TestRandom for PublicKey { + fn random_for_test(rng: &mut T) -> Self { + let secret_key = SecretKey::random_for_test(rng); + PublicKey::from_secret_key(&secret_key) + } +} diff --git a/eth2/types/src/test_utils/secret_key.rs b/eth2/types/src/test_utils/secret_key.rs new file mode 100644 index 000000000..17481c3de --- /dev/null +++ b/eth2/types/src/test_utils/secret_key.rs @@ -0,0 +1,19 @@ +use super::TestRandom; +use bls::SecretKey; +use rand::RngCore; + +impl TestRandom for SecretKey { + fn random_for_test(rng: &mut T) -> Self { + let mut key_bytes = vec![0; 48]; + rng.fill_bytes(&mut key_bytes); + /* + * An `unreachable!` is used here as there's no reason why you cannot constuct a key from a + * fixed-length byte slice. Also, this should only be used during testing so a panic is + * acceptable. + */ + match SecretKey::from_bytes(&key_bytes) { + Ok(key) => key, + Err(_) => unreachable!(), + } + } +} diff --git a/eth2/types/src/test_utils/signature.rs b/eth2/types/src/test_utils/signature.rs new file mode 100644 index 000000000..9ec7aec60 --- /dev/null +++ b/eth2/types/src/test_utils/signature.rs @@ -0,0 +1,13 @@ +use super::TestRandom; +use bls::{SecretKey, Signature}; +use rand::RngCore; + +impl TestRandom for Signature { + fn random_for_test(rng: &mut T) -> Self { + let secret_key = SecretKey::random_for_test(rng); + let mut message = vec![0; 32]; + rng.fill_bytes(&mut message); + + Signature::new(&message, &secret_key) + } +} diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs new file mode 100644 index 000000000..047817a86 --- /dev/null +++ b/eth2/types/src/validator.rs @@ -0,0 +1,203 @@ +use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +const STATUS_FLAG_INITIATED_EXIT: u8 = 1; +const STATUS_FLAG_WITHDRAWABLE: u8 = 2; + +#[derive(Debug, PartialEq, Clone, Copy, Serialize)] +pub enum StatusFlags { + InitiatedExit, + Withdrawable, +} + +struct StatusFlagsDecodeError; + +impl From for DecodeError { + fn from(_: StatusFlagsDecodeError) -> DecodeError { + DecodeError::Invalid + } +} + +/// Handles the serialization logic for the `status_flags` field of the `Validator`. +fn status_flag_to_byte(flag: Option) -> u8 { + if let Some(flag) = flag { + match flag { + StatusFlags::InitiatedExit => STATUS_FLAG_INITIATED_EXIT, + StatusFlags::Withdrawable => STATUS_FLAG_WITHDRAWABLE, + } + } else { + 0 + } +} + +/// Handles the deserialization logic for the `status_flags` field of the `Validator`. +fn status_flag_from_byte(flag: u8) -> Result, StatusFlagsDecodeError> { + match flag { + 0 => Ok(None), + 1 => Ok(Some(StatusFlags::InitiatedExit)), + 2 => Ok(Some(StatusFlags::Withdrawable)), + _ => Err(StatusFlagsDecodeError), + } +} + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct Validator { + pub pubkey: PublicKey, + pub withdrawal_credentials: Hash256, + pub activation_epoch: Epoch, + pub exit_epoch: Epoch, + pub withdrawal_epoch: Epoch, + pub penalized_epoch: Epoch, + pub status_flags: Option, +} + +impl Validator { + /// This predicate indicates if the validator represented by this record is considered "active" at `slot`. + pub fn is_active_at(&self, slot: Epoch) -> bool { + self.activation_epoch <= slot && slot < self.exit_epoch + } +} + +impl Default for Validator { + /// Yields a "default" `Validator`. Primarily used for testing. + fn default() -> Self { + Self { + pubkey: PublicKey::default(), + withdrawal_credentials: Hash256::default(), + activation_epoch: Epoch::from(std::u64::MAX), + exit_epoch: Epoch::from(std::u64::MAX), + withdrawal_epoch: Epoch::from(std::u64::MAX), + penalized_epoch: Epoch::from(std::u64::MAX), + status_flags: None, + } + } +} + +impl TestRandom for StatusFlags { + fn random_for_test(rng: &mut T) -> Self { + let options = vec![StatusFlags::InitiatedExit, StatusFlags::Withdrawable]; + options[(rng.next_u32() as usize) % options.len()] + } +} + +impl Encodable for Validator { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.pubkey); + s.append(&self.withdrawal_credentials); + s.append(&self.activation_epoch); + s.append(&self.exit_epoch); + s.append(&self.withdrawal_epoch); + s.append(&self.penalized_epoch); + s.append(&status_flag_to_byte(self.status_flags)); + } +} + +impl Decodable for Validator { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (pubkey, i) = <_>::ssz_decode(bytes, i)?; + let (withdrawal_credentials, i) = <_>::ssz_decode(bytes, i)?; + let (activation_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (exit_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (withdrawal_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (penalized_epoch, i) = <_>::ssz_decode(bytes, i)?; + let (status_flags_byte, i): (u8, usize) = <_>::ssz_decode(bytes, i)?; + + let status_flags = status_flag_from_byte(status_flags_byte)?; + + Ok(( + Self { + pubkey, + withdrawal_credentials, + activation_epoch, + exit_epoch, + withdrawal_epoch, + penalized_epoch, + status_flags, + }, + i, + )) + } +} + +impl TreeHash for Validator { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.pubkey.hash_tree_root()); + result.append(&mut self.withdrawal_credentials.hash_tree_root()); + result.append(&mut self.activation_epoch.hash_tree_root()); + result.append(&mut self.exit_epoch.hash_tree_root()); + result.append(&mut self.withdrawal_epoch.hash_tree_root()); + result.append(&mut self.penalized_epoch.hash_tree_root()); + result.append(&mut u64::from(status_flag_to_byte(self.status_flags)).hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for Validator { + fn random_for_test(rng: &mut T) -> Self { + Self { + pubkey: <_>::random_for_test(rng), + withdrawal_credentials: <_>::random_for_test(rng), + activation_epoch: <_>::random_for_test(rng), + exit_epoch: <_>::random_for_test(rng), + withdrawal_epoch: <_>::random_for_test(rng), + penalized_epoch: <_>::random_for_test(rng), + status_flags: Some(<_>::random_for_test(rng)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Validator::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + fn test_validator_can_be_active() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let mut validator = Validator::random_for_test(&mut rng); + + let activation_epoch = u64::random_for_test(&mut rng); + let exit_epoch = activation_epoch + 234; + + validator.activation_epoch = Epoch::from(activation_epoch); + validator.exit_epoch = Epoch::from(exit_epoch); + + for slot in (activation_epoch - 100)..(exit_epoch + 100) { + let slot = Epoch::from(slot); + if slot < activation_epoch { + assert!(!validator.is_active_at(slot)); + } else if slot >= exit_epoch { + assert!(!validator.is_active_at(slot)); + } else { + assert!(validator.is_active_at(slot)); + } + } + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = Validator::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/types/src/validator_registry.rs b/eth2/types/src/validator_registry.rs new file mode 100644 index 000000000..20863dd72 --- /dev/null +++ b/eth2/types/src/validator_registry.rs @@ -0,0 +1,172 @@ +/// Contains logic to manipulate a `&[Validator]`. +/// For now, we avoid defining a newtype and just have flat functions here. +use super::validator::*; +use crate::Epoch; + +/// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`. +pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { + validators + .iter() + .enumerate() + .filter_map(|(index, validator)| { + if validator.is_active_at(epoch) { + Some(index) + } else { + None + } + }) + .collect::>() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + + #[test] + fn can_get_empty_active_validator_indices() { + let mut rng = XorShiftRng::from_seed([42; 16]); + + let validators = vec![]; + let some_epoch = Epoch::random_for_test(&mut rng); + let indices = get_active_validator_indices(&validators, some_epoch); + assert_eq!(indices, vec![]); + } + + #[test] + fn can_get_no_active_validator_indices() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let mut validators = vec![]; + let count_validators = 10; + for _ in 0..count_validators { + validators.push(Validator::default()) + } + + let some_epoch = Epoch::random_for_test(&mut rng); + let indices = get_active_validator_indices(&validators, some_epoch); + assert_eq!(indices, vec![]); + } + + #[test] + fn can_get_all_active_validator_indices() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let count_validators = 10; + let some_epoch = Epoch::random_for_test(&mut rng); + + let mut validators = (0..count_validators) + .into_iter() + .map(|_| { + let mut validator = Validator::default(); + + let activation_offset = u64::random_for_test(&mut rng); + let exit_offset = u64::random_for_test(&mut rng); + + validator.activation_epoch = some_epoch - activation_offset; + validator.exit_epoch = some_epoch + exit_offset; + + validator + }) + .collect::>(); + + // test boundary condition by ensuring that at least one validator in the list just activated + if let Some(validator) = validators.get_mut(0) { + validator.activation_epoch = some_epoch; + } + + let indices = get_active_validator_indices(&validators, some_epoch); + assert_eq!( + indices, + (0..count_validators).into_iter().collect::>() + ); + } + + fn set_validators_to_default_entry_exit(validators: &mut [Validator]) { + for validator in validators.iter_mut() { + validator.activation_epoch = Epoch::max_value(); + validator.exit_epoch = Epoch::max_value(); + } + } + + // sets all `validators` to be active as of some epoch prior to `epoch`. returns the activation epoch. + fn set_validators_to_activated(validators: &mut [Validator], epoch: Epoch) -> Epoch { + let activation_epoch = epoch - 10; + for validator in validators.iter_mut() { + validator.activation_epoch = activation_epoch; + } + activation_epoch + } + + // sets all `validators` to be exited as of some epoch before `epoch`. + fn set_validators_to_exited( + validators: &mut [Validator], + epoch: Epoch, + activation_epoch: Epoch, + ) { + assert!(activation_epoch < epoch); + let mut exit_epoch = activation_epoch + 10; + while exit_epoch >= epoch { + exit_epoch -= 1; + } + assert!(activation_epoch < exit_epoch && exit_epoch < epoch); + + for validator in validators.iter_mut() { + validator.exit_epoch = exit_epoch; + } + } + + #[test] + fn can_get_some_active_validator_indices() { + let mut rng = XorShiftRng::from_seed([42; 16]); + const COUNT_PARTITIONS: usize = 3; + const COUNT_VALIDATORS: usize = 3 * COUNT_PARTITIONS; + let some_epoch: Epoch = Epoch::random_for_test(&mut rng); + + let mut validators = (0..COUNT_VALIDATORS) + .into_iter() + .map(|_| { + let mut validator = Validator::default(); + + let activation_offset = Epoch::random_for_test(&mut rng); + let exit_offset = Epoch::random_for_test(&mut rng); + + validator.activation_epoch = some_epoch - activation_offset; + validator.exit_epoch = some_epoch + exit_offset; + + validator + }) + .collect::>(); + + // we partition the set into partitions based on lifecycle: + for (i, chunk) in validators.chunks_exact_mut(COUNT_PARTITIONS).enumerate() { + match i { + 0 => { + // 1. not activated (Default::default()) + set_validators_to_default_entry_exit(chunk); + } + 1 => { + // 2. activated, but not exited + set_validators_to_activated(chunk, some_epoch); + // test boundary condition by ensuring that at least one validator in the list just activated + if let Some(validator) = chunk.get_mut(0) { + validator.activation_epoch = some_epoch; + } + } + 2 => { + // 3. exited + let activation_epoch = set_validators_to_activated(chunk, some_epoch); + set_validators_to_exited(chunk, some_epoch, activation_epoch); + // test boundary condition by ensuring that at least one validator in the list just exited + if let Some(validator) = chunk.get_mut(0) { + validator.exit_epoch = some_epoch; + } + } + _ => unreachable!( + "constants local to this test not in sync with generation of test case" + ), + } + } + + let indices = get_active_validator_indices(&validators, some_epoch); + assert_eq!(indices, vec![3, 4, 5]); + } +} diff --git a/eth2/types/src/validator_registry_delta_block.rs b/eth2/types/src/validator_registry_delta_block.rs new file mode 100644 index 000000000..3142e0263 --- /dev/null +++ b/eth2/types/src/validator_registry_delta_block.rs @@ -0,0 +1,113 @@ +use crate::{test_utils::TestRandom, Hash256, Slot}; +use bls::PublicKey; +use rand::RngCore; +use serde_derive::Serialize; +use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +// The information gathered from the PoW chain validator registration function. +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct ValidatorRegistryDeltaBlock { + pub latest_registry_delta_root: Hash256, + pub validator_index: u32, + pub pubkey: PublicKey, + pub slot: Slot, + pub flag: u64, +} + +impl Default for ValidatorRegistryDeltaBlock { + /// Yields a "default" `Validator`. Primarily used for testing. + fn default() -> Self { + Self { + latest_registry_delta_root: Hash256::zero(), + validator_index: std::u32::MAX, + pubkey: PublicKey::default(), + slot: Slot::from(std::u64::MAX), + flag: std::u64::MAX, + } + } +} + +impl Encodable for ValidatorRegistryDeltaBlock { + fn ssz_append(&self, s: &mut SszStream) { + s.append(&self.latest_registry_delta_root); + s.append(&self.validator_index); + s.append(&self.pubkey); + s.append(&self.slot); + s.append(&self.flag); + } +} + +impl Decodable for ValidatorRegistryDeltaBlock { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (latest_registry_delta_root, i) = <_>::ssz_decode(bytes, i)?; + let (validator_index, i) = <_>::ssz_decode(bytes, i)?; + let (pubkey, i) = <_>::ssz_decode(bytes, i)?; + let (slot, i) = <_>::ssz_decode(bytes, i)?; + let (flag, i) = <_>::ssz_decode(bytes, i)?; + + Ok(( + Self { + latest_registry_delta_root, + validator_index, + pubkey, + slot, + flag, + }, + i, + )) + } +} + +impl TreeHash for ValidatorRegistryDeltaBlock { + fn hash_tree_root(&self) -> Vec { + let mut result: Vec = vec![]; + result.append(&mut self.latest_registry_delta_root.hash_tree_root()); + result.append(&mut self.validator_index.hash_tree_root()); + result.append(&mut self.pubkey.hash_tree_root()); + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.flag.hash_tree_root()); + hash(&result) + } +} + +impl TestRandom for ValidatorRegistryDeltaBlock { + fn random_for_test(rng: &mut T) -> Self { + Self { + latest_registry_delta_root: <_>::random_for_test(rng), + validator_index: <_>::random_for_test(rng), + pubkey: <_>::random_for_test(rng), + slot: <_>::random_for_test(rng), + flag: <_>::random_for_test(rng), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng); + + let bytes = ssz_encode(&original); + let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_hash_tree_root() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let original = ValidatorRegistryDeltaBlock::random_for_test(&mut rng); + + let result = original.hash_tree_root(); + + assert_eq!(result.len(), 32); + // TODO: Add further tests + // https://github.com/sigp/lighthouse/issues/170 + } +} diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml new file mode 100644 index 000000000..465510c59 --- /dev/null +++ b/eth2/utils/bls/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "bls" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "v0.3.0" } +hashing = { path = "../hashing" } +hex = "0.3" +serde = "1.0" +ssz = { path = "../ssz" } diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs new file mode 100644 index 000000000..6fed183f0 --- /dev/null +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -0,0 +1,83 @@ +use super::{AggregatePublicKey, Signature}; +use bls_aggregates::AggregateSignature as RawAggregateSignature; +use serde::ser::{Serialize, Serializer}; +use ssz::{ + decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, +}; + +/// A BLS aggregate signature. +/// +/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ +/// serialization). +#[derive(Debug, PartialEq, Clone, Default, Eq)] +pub struct AggregateSignature(RawAggregateSignature); + +impl AggregateSignature { + /// Instantiate a new AggregateSignature. + pub fn new() -> Self { + AggregateSignature(RawAggregateSignature::new()) + } + + /// Add (aggregate) a signature to the `AggregateSignature`. + pub fn add(&mut self, signature: &Signature) { + self.0.add(signature.as_raw()) + } + + /// Verify the `AggregateSignature` against an `AggregatePublicKey`. + /// + /// Only returns `true` if the set of keys in the `AggregatePublicKey` match the set of keys + /// that signed the `AggregateSignature`. + pub fn verify(&self, msg: &[u8], aggregate_public_key: &AggregatePublicKey) -> bool { + self.0.verify(msg, aggregate_public_key) + } +} + +impl Encodable for AggregateSignature { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.0.as_bytes()); + } +} + +impl Decodable for AggregateSignature { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (sig_bytes, i) = decode_ssz_list(bytes, i)?; + let raw_sig = + RawAggregateSignature::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + Ok((AggregateSignature(raw_sig), i)) + } +} + +impl Serialize for AggregateSignature { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + +impl TreeHash for AggregateSignature { + fn hash_tree_root(&self) -> Vec { + hash(&self.0.as_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::super::{Keypair, Signature}; + use super::*; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let keypair = Keypair::random(); + + let mut original = AggregateSignature::new(); + original.add(&Signature::new(&[42, 42], &keypair.sk)); + + let bytes = ssz_encode(&original); + let (decoded, _) = AggregateSignature::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } +} diff --git a/eth2/utils/bls/src/keypair.rs b/eth2/utils/bls/src/keypair.rs new file mode 100644 index 000000000..1cce9c10e --- /dev/null +++ b/eth2/utils/bls/src/keypair.rs @@ -0,0 +1,16 @@ +use super::{PublicKey, SecretKey}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Keypair { + pub sk: SecretKey, + pub pk: PublicKey, +} + +impl Keypair { + /// Instantiate a Keypair using SecretKey::random(). + pub fn random() -> Self { + let sk = SecretKey::random(); + let pk = PublicKey::from_secret_key(&sk); + Keypair { sk, pk } + } +} diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs new file mode 100644 index 000000000..646047d18 --- /dev/null +++ b/eth2/utils/bls/src/lib.rs @@ -0,0 +1,52 @@ +extern crate bls_aggregates; +extern crate hashing; +extern crate ssz; + +mod aggregate_signature; +mod keypair; +mod public_key; +mod secret_key; +mod signature; + +pub use crate::aggregate_signature::AggregateSignature; +pub use crate::keypair::Keypair; +pub use crate::public_key::PublicKey; +pub use crate::secret_key::SecretKey; +pub use crate::signature::Signature; + +pub use self::bls_aggregates::AggregatePublicKey; + +pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97; + +use hashing::hash; +use ssz::ssz_encode; +use std::default::Default; + +fn extend_if_needed(hash: &mut Vec) { + // NOTE: bls_aggregates crate demands 48 bytes, this may be removed as we get closer to production + hash.resize(48, Default::default()) +} + +/// For some signature and public key, ensure that the signature message was the public key and it +/// was signed by the secret key that corresponds to that public key. +pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { + let mut hash = hash(&ssz_encode(pubkey)); + extend_if_needed(&mut hash); + sig.verify_hashed(&hash, &pubkey) +} + +pub fn create_proof_of_possession(keypair: &Keypair) -> Signature { + let mut hash = hash(&ssz_encode(&keypair.pk)); + extend_if_needed(&mut hash); + Signature::new_hashed(&hash, &keypair.sk) +} + +pub fn bls_verify_aggregate( + pubkey: &AggregatePublicKey, + message: &[u8], + signature: &AggregateSignature, + _domain: u64, +) -> bool { + // TODO: add domain + signature.verify(message, pubkey) +} diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs new file mode 100644 index 000000000..0c2ad81bb --- /dev/null +++ b/eth2/utils/bls/src/public_key.rs @@ -0,0 +1,101 @@ +use super::SecretKey; +use bls_aggregates::PublicKey as RawPublicKey; +use hex::encode as hex_encode; +use serde::ser::{Serialize, Serializer}; +use ssz::{ + decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, +}; +use std::default; +use std::hash::{Hash, Hasher}; + +/// A single BLS signature. +/// +/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ +/// serialization). +#[derive(Debug, Clone, Eq)] +pub struct PublicKey(RawPublicKey); + +impl PublicKey { + pub fn from_secret_key(secret_key: &SecretKey) -> Self { + PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) + } + + /// Returns the underlying signature. + pub fn as_raw(&self) -> &RawPublicKey { + &self.0 + } + + /// Returns the last 6 bytes of the SSZ encoding of the public key, as a hex string. + /// + /// Useful for providing a short identifier to the user. + pub fn concatenated_hex_id(&self) -> String { + let bytes = ssz_encode(self); + let end_bytes = &bytes[bytes.len().saturating_sub(6)..bytes.len()]; + hex_encode(end_bytes) + } +} + +impl default::Default for PublicKey { + fn default() -> Self { + let secret_key = SecretKey::random(); + PublicKey::from_secret_key(&secret_key) + } +} + +impl Encodable for PublicKey { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.0.as_bytes()); + } +} + +impl Decodable for PublicKey { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (sig_bytes, i) = decode_ssz_list(bytes, i)?; + let raw_sig = RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + Ok((PublicKey(raw_sig), i)) + } +} + +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + +impl TreeHash for PublicKey { + fn hash_tree_root(&self) -> Vec { + hash(&self.0.as_bytes()) + } +} + +impl PartialEq for PublicKey { + fn eq(&self, other: &PublicKey) -> bool { + ssz_encode(self) == ssz_encode(other) + } +} + +impl Hash for PublicKey { + fn hash(&self, state: &mut H) { + ssz_encode(self).hash(state) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let sk = SecretKey::random(); + let original = PublicKey::from_secret_key(&sk); + + let bytes = ssz_encode(&original); + let (decoded, _) = PublicKey::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } +} diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs new file mode 100644 index 000000000..4ff9f8684 --- /dev/null +++ b/eth2/utils/bls/src/secret_key.rs @@ -0,0 +1,65 @@ +use bls_aggregates::{DecodeError as BlsDecodeError, SecretKey as RawSecretKey}; +use ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream, TreeHash}; + +/// A single BLS signature. +/// +/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ +/// serialization). +#[derive(Debug, PartialEq, Clone, Eq)] +pub struct SecretKey(RawSecretKey); + +impl SecretKey { + pub fn random() -> Self { + SecretKey(RawSecretKey::random()) + } + + /// Instantiate a SecretKey from existing bytes. + /// + /// Note: this is _not_ SSZ decoding. + pub fn from_bytes(bytes: &[u8]) -> Result { + Ok(SecretKey(RawSecretKey::from_bytes(bytes)?)) + } + + /// Returns the underlying secret key. + pub fn as_raw(&self) -> &RawSecretKey { + &self.0 + } +} + +impl Encodable for SecretKey { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.0.as_bytes()); + } +} + +impl Decodable for SecretKey { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (sig_bytes, i) = decode_ssz_list(bytes, i)?; + let raw_sig = RawSecretKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + Ok((SecretKey(raw_sig), i)) + } +} + +impl TreeHash for SecretKey { + fn hash_tree_root(&self) -> Vec { + self.0.as_bytes().clone() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let original = + SecretKey::from_bytes("jzjxxgjajfjrmgodszzsgqccmhnyvetcuxobhtynojtpdtbj".as_bytes()) + .unwrap(); + + let bytes = ssz_encode(&original); + let (decoded, _) = SecretKey::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } +} diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs new file mode 100644 index 000000000..396e4eab7 --- /dev/null +++ b/eth2/utils/bls/src/signature.rs @@ -0,0 +1,107 @@ +use super::{PublicKey, SecretKey}; +use bls_aggregates::Signature as RawSignature; +use serde::ser::{Serialize, Serializer}; +use ssz::{ + decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, +}; + +/// A single BLS signature. +/// +/// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ +/// serialization). +#[derive(Debug, PartialEq, Clone, Eq)] +pub struct Signature(RawSignature); + +impl Signature { + /// Instantiate a new Signature from a message and a SecretKey. + pub fn new(msg: &[u8], sk: &SecretKey) -> Self { + Signature(RawSignature::new(msg, sk.as_raw())) + } + + /// Instantiate a new Signature from a message and a SecretKey, where the message has already + /// been hashed. + pub fn new_hashed(msg_hashed: &[u8], sk: &SecretKey) -> Self { + Signature(RawSignature::new_hashed(msg_hashed, sk.as_raw())) + } + + /// Verify the Signature against a PublicKey. + pub fn verify(&self, msg: &[u8], pk: &PublicKey) -> bool { + self.0.verify(msg, pk.as_raw()) + } + + /// Verify the Signature against a PublicKey, where the message has already been hashed. + pub fn verify_hashed(&self, msg_hash: &[u8], pk: &PublicKey) -> bool { + self.0.verify_hashed(msg_hash, pk.as_raw()) + } + + /// Returns the underlying signature. + pub fn as_raw(&self) -> &RawSignature { + &self.0 + } + + /// Returns a new empty signature. + pub fn empty_signature() -> Self { + let empty: Vec = vec![0; 97]; + Signature(RawSignature::from_bytes(&empty).unwrap()) + } +} + +impl Encodable for Signature { + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self.0.as_bytes()); + } +} + +impl Decodable for Signature { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { + let (sig_bytes, i) = decode_ssz_list(bytes, i)?; + let raw_sig = RawSignature::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + Ok((Signature(raw_sig), i)) + } +} + +impl TreeHash for Signature { + fn hash_tree_root(&self) -> Vec { + hash(&self.0.as_bytes()) + } +} + +impl Serialize for Signature { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz_encode(self)) + } +} + +#[cfg(test)] +mod tests { + use super::super::Keypair; + use super::*; + use ssz::ssz_encode; + + #[test] + pub fn test_ssz_round_trip() { + let keypair = Keypair::random(); + + let original = Signature::new(&[42, 42], &keypair.sk); + + let bytes = ssz_encode(&original); + let (decoded, _) = Signature::ssz_decode(&bytes, 0).unwrap(); + + assert_eq!(original, decoded); + } + + #[test] + pub fn test_empty_signature() { + let sig = Signature::empty_signature(); + + let sig_as_bytes: Vec = sig.as_raw().as_bytes(); + + assert_eq!(sig_as_bytes.len(), 97); + for one_byte in sig_as_bytes.iter() { + assert_eq!(*one_byte, 0); + } + } +} diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml new file mode 100644 index 000000000..d94b9f7b1 --- /dev/null +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "boolean-bitfield" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +ssz = { path = "../ssz" } +bit-vec = "0.5.0" +serde = "1.0" +serde_derive = "1.0" diff --git a/eth2/utils/boolean-bitfield/README.md b/eth2/utils/boolean-bitfield/README.md new file mode 100644 index 000000000..adf83f6f8 --- /dev/null +++ b/eth2/utils/boolean-bitfield/README.md @@ -0,0 +1,3 @@ +# Boolean Bitfield + +Implements a set of boolean as a tightly-packed vector of bits. diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs new file mode 100644 index 000000000..a0fce1f0a --- /dev/null +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -0,0 +1,413 @@ +extern crate bit_vec; +extern crate ssz; + +use bit_vec::BitVec; + +use serde::ser::{Serialize, Serializer}; +use std::cmp; +use std::default; + +/// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits. +/// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set. +#[derive(Debug, Clone)] +pub struct BooleanBitfield(BitVec); + +/// Error represents some reason a request against a bitfield was not satisfied +#[derive(Debug, PartialEq)] +pub enum Error { + /// OutOfBounds refers to indexing into a bitfield where no bits exist; returns the illegal index and the current size of the bitfield, respectively + OutOfBounds(usize, usize), +} + +impl BooleanBitfield { + /// Create a new bitfield. + pub fn new() -> Self { + Default::default() + } + + pub fn with_capacity(initial_len: usize) -> Self { + Self::from_elem(initial_len, false) + } + + /// Create a new bitfield with the given length `initial_len` and all values set to `bit`. + pub fn from_elem(inital_len: usize, bit: bool) -> Self { + Self { + 0: BitVec::from_elem(inital_len, bit), + } + } + + /// Create a new bitfield using the supplied `bytes` as input + pub fn from_bytes(bytes: &[u8]) -> Self { + Self { + 0: BitVec::from_bytes(bytes), + } + } + + /// Read the value of a bit. + /// + /// If the index is in bounds, then result is Ok(value) where value is `true` if the bit is 1 and `false` if the bit is 0. + /// If the index is out of bounds, we return an error to that extent. + pub fn get(&self, i: usize) -> Result { + match self.0.get(i) { + Some(value) => Ok(value), + None => Err(Error::OutOfBounds(i, self.0.len())), + } + } + + /// Set the value of a bit. + /// + /// If the index is out of bounds, we expand the size of the underlying set to include the new index. + /// Returns the previous value if there was one. + pub fn set(&mut self, i: usize, value: bool) -> Option { + let previous = match self.get(i) { + Ok(previous) => Some(previous), + Err(Error::OutOfBounds(_, len)) => { + let new_len = i - len + 1; + self.0.grow(new_len, false); + None + } + }; + self.0.set(i, value); + previous + } + + /// Returns the index of the highest set bit. Some(n) if some bit is set, None otherwise. + pub fn highest_set_bit(&self) -> Option { + self.0.iter().rposition(|bit| bit) + } + + /// Returns the number of bits in this bitfield. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns true if `self.len() == 0` + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of bytes required to represent this bitfield. + pub fn num_bytes(&self) -> usize { + self.to_bytes().len() + } + + /// Returns the number of `1` bits in the bitfield + pub fn num_set_bits(&self) -> usize { + self.0.iter().filter(|&bit| bit).count() + } + + /// Returns a vector of bytes representing the bitfield + /// Note that this returns the bit layout of the underlying implementation in the `bit-vec` crate. + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +impl default::Default for BooleanBitfield { + /// default provides the "empty" bitfield + /// Note: the empty bitfield is set to the `0` byte. + fn default() -> Self { + Self::from_elem(8, false) + } +} + +impl cmp::PartialEq for BooleanBitfield { + /// Determines equality by comparing the `ssz` encoding of the two candidates. + /// This method ensures that the presence of high-order (empty) bits in the highest byte do not exclude equality when they are in fact representing the same information. + fn eq(&self, other: &Self) -> bool { + ssz::ssz_encode(self) == ssz::ssz_encode(other) + } +} + +/// Create a new bitfield that is a union of two other bitfields. +/// +/// For example `union(0101, 1000) == 1101` +impl std::ops::BitAnd for BooleanBitfield { + type Output = Self; + + fn bitand(self, other: Self) -> Self { + let (biggest, smallest) = if self.len() > other.len() { + (&self, &other) + } else { + (&other, &self) + }; + let mut new = biggest.clone(); + for i in 0..smallest.len() { + if let Ok(true) = smallest.get(i) { + new.set(i, true); + } + } + new + } +} + +impl ssz::Encodable for BooleanBitfield { + // ssz_append encodes Self according to the `ssz` spec. + fn ssz_append(&self, s: &mut ssz::SszStream) { + s.append_vec(&self.to_bytes()) + } +} + +impl ssz::Decodable for BooleanBitfield { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), ssz::DecodeError> { + let len = ssz::decode::decode_length(bytes, index, ssz::LENGTH_BYTES)?; + if (ssz::LENGTH_BYTES + len) > bytes.len() { + return Err(ssz::DecodeError::TooShort); + } + + if len == 0 { + Ok((BooleanBitfield::new(), index + ssz::LENGTH_BYTES)) + } else { + let bytes = &bytes[(index + 4)..(index + len + 4)]; + + let count = len * 8; + let mut field = BooleanBitfield::with_capacity(count); + for (byte_index, byte) in bytes.iter().enumerate() { + for i in 0..8 { + let bit = byte & (128 >> i); + if bit != 0 { + field.set(8 * byte_index + i, true); + } + } + } + + let index = index + ssz::LENGTH_BYTES + len; + Ok((field, index)) + } + } +} + +impl Serialize for BooleanBitfield { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&ssz::ssz_encode(self)) + } +} + +impl ssz::TreeHash for BooleanBitfield { + fn hash_tree_root(&self) -> Vec { + self.to_bytes().hash_tree_root() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::{ssz_encode, Decodable, SszStream}; + + #[test] + fn test_new_bitfield() { + let mut field = BooleanBitfield::new(); + let original_len = field.len(); + + for i in 0..100 { + if i < original_len { + assert!(!field.get(i).unwrap()); + } else { + assert!(field.get(i).is_err()); + } + let previous = field.set(i, true); + if i < original_len { + assert!(!previous.unwrap()); + } else { + assert!(previous.is_none()); + } + } + } + + #[test] + fn test_empty_bitfield() { + let mut field = BooleanBitfield::from_elem(0, false); + let original_len = field.len(); + + assert_eq!(original_len, 0); + + for i in 0..100 { + if i < original_len { + assert!(!field.get(i).unwrap()); + } else { + assert!(field.get(i).is_err()); + } + let previous = field.set(i, true); + if i < original_len { + assert!(!previous.unwrap()); + } else { + assert!(previous.is_none()); + } + } + + assert_eq!(field.len(), 100); + assert_eq!(field.num_set_bits(), 100); + } + + const INPUT: &[u8] = &[0b0000_0010, 0b0000_0010]; + + #[test] + fn test_get_from_bitfield() { + let field = BooleanBitfield::from_bytes(INPUT); + let unset = field.get(0).unwrap(); + assert!(!unset); + let set = field.get(6).unwrap(); + assert!(set); + let set = field.get(14).unwrap(); + assert!(set); + } + + #[test] + fn test_set_for_bitfield() { + let mut field = BooleanBitfield::from_bytes(INPUT); + let previous = field.set(10, true).unwrap(); + assert!(!previous); + let previous = field.get(10).unwrap(); + assert!(previous); + let previous = field.set(6, false).unwrap(); + assert!(previous); + let previous = field.get(6).unwrap(); + assert!(!previous); + } + + #[test] + fn test_highest_set_bit() { + let field = BooleanBitfield::from_bytes(INPUT); + assert_eq!(field.highest_set_bit().unwrap(), 14); + + let field = BooleanBitfield::from_bytes(&[0b0000_0011]); + assert_eq!(field.highest_set_bit().unwrap(), 7); + + let field = BooleanBitfield::new(); + assert_eq!(field.highest_set_bit(), None); + } + + #[test] + fn test_len() { + let field = BooleanBitfield::from_bytes(INPUT); + assert_eq!(field.len(), 16); + + let field = BooleanBitfield::new(); + assert_eq!(field.len(), 8); + } + + #[test] + fn test_num_set_bits() { + let field = BooleanBitfield::from_bytes(INPUT); + assert_eq!(field.num_set_bits(), 2); + + let field = BooleanBitfield::new(); + assert_eq!(field.num_set_bits(), 0); + } + + #[test] + fn test_to_bytes() { + let field = BooleanBitfield::from_bytes(INPUT); + assert_eq!(field.to_bytes(), INPUT); + + let field = BooleanBitfield::new(); + assert_eq!(field.to_bytes(), vec![0]); + } + + #[test] + fn test_out_of_bounds() { + let mut field = BooleanBitfield::from_bytes(INPUT); + + let out_of_bounds_index = field.len(); + assert!(field.set(out_of_bounds_index, true).is_none()); + assert!(field.len() == out_of_bounds_index + 1); + assert!(field.get(out_of_bounds_index).unwrap()); + + for i in 0..100 { + if i <= out_of_bounds_index { + assert!(field.set(i, true).is_some()); + } else { + assert!(field.set(i, true).is_none()); + } + } + } + + #[test] + fn test_grows_with_false() { + let input_all_set: &[u8] = &[0b1111_1111, 0b1111_1111]; + let mut field = BooleanBitfield::from_bytes(input_all_set); + + // Define `a` and `b`, where both are out of bounds and `b` is greater than `a`. + let a = field.len(); + let b = a + 1; + + // Ensure `a` is out-of-bounds for test integrity. + assert!(field.get(a).is_err()); + + // Set `b` to `true`. Also, for test integrity, ensure it was previously out-of-bounds. + assert!(field.set(b, true).is_none()); + + // Ensure that `a` wasn't also set to `true` during the grow. + assert_eq!(field.get(a), Ok(false)); + assert_eq!(field.get(b), Ok(true)); + } + + #[test] + fn test_num_bytes() { + let field = BooleanBitfield::from_bytes(INPUT); + assert_eq!(field.num_bytes(), 2); + + let field = BooleanBitfield::from_elem(2, true); + assert_eq!(field.num_bytes(), 1); + + let field = BooleanBitfield::from_elem(13, true); + assert_eq!(field.num_bytes(), 2); + } + + #[test] + fn test_ssz_encode() { + let field = create_test_bitfield(); + + let mut stream = SszStream::new(); + stream.append(&field); + assert_eq!(stream.drain(), vec![0, 0, 0, 2, 225, 192]); + + let field = BooleanBitfield::from_elem(18, true); + let mut stream = SszStream::new(); + stream.append(&field); + assert_eq!(stream.drain(), vec![0, 0, 0, 3, 255, 255, 192]); + } + + fn create_test_bitfield() -> BooleanBitfield { + let count = 2 * 8; + let mut field = BooleanBitfield::with_capacity(count); + + let indices = &[0, 1, 2, 7, 8, 9]; + for &i in indices { + field.set(i, true); + } + field + } + + #[test] + fn test_ssz_decode() { + let encoded = vec![0, 0, 0, 2, 225, 192]; + let (field, _): (BooleanBitfield, usize) = ssz::decode_ssz(&encoded, 0).unwrap(); + let expected = create_test_bitfield(); + assert_eq!(field, expected); + + let encoded = vec![0, 0, 0, 3, 255, 255, 3]; + let (field, _): (BooleanBitfield, usize) = ssz::decode_ssz(&encoded, 0).unwrap(); + let expected = BooleanBitfield::from_bytes(&[255, 255, 3]); + assert_eq!(field, expected); + } + + #[test] + fn test_ssz_round_trip() { + let original = BooleanBitfield::from_bytes(&vec![18; 12][..]); + let ssz = ssz_encode(&original); + let (decoded, _) = BooleanBitfield::ssz_decode(&ssz, 0).unwrap(); + assert_eq!(original, decoded); + } + + #[test] + fn test_bitand() { + let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); + let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); + let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); + assert_eq!(c, a & b); + } +} diff --git a/eth2/utils/hashing/Cargo.toml b/eth2/utils/hashing/Cargo.toml new file mode 100644 index 000000000..1527bceba --- /dev/null +++ b/eth2/utils/hashing/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "hashing" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +tiny-keccak = "1.4.2" diff --git a/eth2/utils/hashing/src/lib.rs b/eth2/utils/hashing/src/lib.rs new file mode 100644 index 000000000..b2bd5a279 --- /dev/null +++ b/eth2/utils/hashing/src/lib.rs @@ -0,0 +1,28 @@ +use tiny_keccak::Keccak; + +pub fn hash(input: &[u8]) -> Vec { + let mut keccak = Keccak::new_keccak256(); + keccak.update(input); + let mut result = vec![0; 32]; + keccak.finalize(result.as_mut_slice()); + result +} + +#[cfg(test)] +mod tests { + use super::*; + use std::convert::From; + + #[test] + fn test_hashing() { + let input: Vec = From::from("hello"); + + let output = hash(input.as_ref()); + let expected = &[ + 0x1c, 0x8a, 0xff, 0x95, 0x06, 0x85, 0xc2, 0xed, 0x4b, 0xc3, 0x17, 0x4f, 0x34, 0x72, + 0x28, 0x7b, 0x56, 0xd9, 0x51, 0x7b, 0x9c, 0x94, 0x81, 0x27, 0x31, 0x9a, 0x09, 0xa7, + 0xa3, 0x6d, 0xea, 0xc8, + ]; + assert_eq!(expected, output.as_slice()); + } +} diff --git a/eth2/utils/honey-badger-split/Cargo.toml b/eth2/utils/honey-badger-split/Cargo.toml new file mode 100644 index 000000000..87246eafd --- /dev/null +++ b/eth2/utils/honey-badger-split/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "honey-badger-split" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] diff --git a/eth2/utils/honey-badger-split/src/lib.rs b/eth2/utils/honey-badger-split/src/lib.rs new file mode 100644 index 000000000..b7097584f --- /dev/null +++ b/eth2/utils/honey-badger-split/src/lib.rs @@ -0,0 +1,85 @@ +/// A function for splitting a list into N pieces. +/// +/// We have titled it the "honey badger split" because of its robustness. It don't care. + +/// Iterator for the honey_badger_split function +pub struct Split<'a, T: 'a> { + n: usize, + current_pos: usize, + list: &'a [T], + list_length: usize, +} + +impl<'a, T> Iterator for Split<'a, T> { + type Item = &'a [T]; + + fn next(&mut self) -> Option { + self.current_pos += 1; + if self.current_pos <= self.n { + match self.list.get( + self.list_length * (self.current_pos - 1) / self.n + ..self.list_length * self.current_pos / self.n, + ) { + Some(v) => Some(v), + None => unreachable!(), + } + } else { + None + } + } +} + +/// Splits a slice into chunks of size n. All postive n values are applicable, +/// hence the honey_badger prefix. +/// +/// Returns an iterator over the original list. +pub trait SplitExt { + fn honey_badger_split(&self, n: usize) -> Split; +} + +impl SplitExt for [T] { + fn honey_badger_split(&self, n: usize) -> Split { + Split { + n, + current_pos: 0, + list: &self, + list_length: self.len(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_honey_badger_split() { + /* + * These test cases are generated from the eth2.0 spec `split()` + * function at commit cbd254a. + */ + let input: Vec = vec![0, 1, 2, 3]; + let output: Vec<&[usize]> = input.honey_badger_split(2).collect(); + assert_eq!(output, vec![&[0, 1], &[2, 3]]); + + let input: Vec = vec![0, 1, 2, 3]; + let output: Vec<&[usize]> = input.honey_badger_split(6).collect(); + let expected: Vec<&[usize]> = vec![&[], &[0], &[1], &[], &[2], &[3]]; + assert_eq!(output, expected); + + let input: Vec = vec![0, 1, 2, 3]; + let output: Vec<&[usize]> = input.honey_badger_split(10).collect(); + let expected: Vec<&[usize]> = vec![&[], &[], &[0], &[], &[1], &[], &[], &[2], &[], &[3]]; + assert_eq!(output, expected); + + let input: Vec = vec![0]; + let output: Vec<&[usize]> = input.honey_badger_split(5).collect(); + let expected: Vec<&[usize]> = vec![&[], &[], &[], &[], &[0]]; + assert_eq!(output, expected); + + let input: Vec = vec![0, 1, 2]; + let output: Vec<&[usize]> = input.honey_badger_split(2).collect(); + let expected: Vec<&[usize]> = vec![&[0], &[1, 2]]; + assert_eq!(output, expected); + } +} diff --git a/eth2/utils/slot_clock/Cargo.toml b/eth2/utils/slot_clock/Cargo.toml new file mode 100644 index 000000000..31a435725 --- /dev/null +++ b/eth2/utils/slot_clock/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "slot_clock" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +types = { path = "../../types" } diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs new file mode 100644 index 000000000..0379d50d9 --- /dev/null +++ b/eth2/utils/slot_clock/src/lib.rs @@ -0,0 +1,12 @@ +mod system_time_slot_clock; +mod testing_slot_clock; + +pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; +pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; +pub use types::Slot; + +pub trait SlotClock: Send + Sync { + type Error; + + fn present_slot(&self) -> Result, Self::Error>; +} diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs new file mode 100644 index 000000000..99f051985 --- /dev/null +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -0,0 +1,139 @@ +use super::SlotClock; +use std::time::{Duration, SystemTime}; +use types::Slot; + +pub use std::time::SystemTimeError; + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotDurationIsZero, + SystemTimeError(String), +} + +/// Determines the present slot based upon the present system time. +#[derive(Clone)] +pub struct SystemTimeSlotClock { + genesis_seconds: u64, + slot_duration_seconds: u64, +} + +impl SystemTimeSlotClock { + /// Create a new `SystemTimeSlotClock`. + /// + /// Returns an Error if `slot_duration_seconds == 0`. + pub fn new( + genesis_seconds: u64, + slot_duration_seconds: u64, + ) -> Result { + if slot_duration_seconds == 0 { + Err(Error::SlotDurationIsZero) + } else { + Ok(Self { + genesis_seconds, + slot_duration_seconds, + }) + } + } +} + +impl SlotClock for SystemTimeSlotClock { + type Error = Error; + + fn present_slot(&self) -> Result, Error> { + let syslot_time = SystemTime::now(); + let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; + let duration_since_genesis = + duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); + match duration_since_genesis { + None => Ok(None), + Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d)), + } + } +} + +impl From for Error { + fn from(e: SystemTimeError) -> Error { + Error::SystemTimeError(format!("{:?}", e)) + } +} + +fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option { + Some(Slot::new( + duration.as_secs().checked_div(slot_duration_seconds)?, + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + /* + * Note: these tests are using actual system times and could fail if they are executed on a + * very slow machine. + */ + #[test] + fn test_slot_now() { + let slot_time = 100; + + let now = SystemTime::now(); + let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); + + let genesis = since_epoch.as_secs() - slot_time * 89; + + let clock = SystemTimeSlotClock { + genesis_seconds: genesis, + slot_duration_seconds: slot_time, + }; + assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); + + let clock = SystemTimeSlotClock { + genesis_seconds: since_epoch.as_secs(), + slot_duration_seconds: slot_time, + }; + assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); + + let clock = SystemTimeSlotClock { + genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, + slot_duration_seconds: slot_time, + }; + assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(42))); + } + + #[test] + fn test_slot_from_duration() { + let slot_time = 100; + + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(0)), + Some(Slot::new(0)) + ); + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(10)), + Some(Slot::new(0)) + ); + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(100)), + Some(Slot::new(1)) + ); + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(101)), + Some(Slot::new(1)) + ); + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(1000)), + Some(Slot::new(10)) + ); + } + + #[test] + fn test_slot_from_duration_slot_time_zero() { + let slot_time = 0; + + assert_eq!(slot_from_duration(slot_time, Duration::from_secs(0)), None); + assert_eq!(slot_from_duration(slot_time, Duration::from_secs(10)), None); + assert_eq!( + slot_from_duration(slot_time, Duration::from_secs(1000)), + None + ); + } +} diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs new file mode 100644 index 000000000..80ee40539 --- /dev/null +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -0,0 +1,48 @@ +use super::SlotClock; +use std::sync::RwLock; +use types::Slot; + +#[derive(Debug, PartialEq)] +pub enum Error {} + +/// Determines the present slot based upon the present system time. +pub struct TestingSlotClock { + slot: RwLock, +} + +impl TestingSlotClock { + /// Create a new `TestingSlotClock`. + /// + /// Returns an Error if `slot_duration_seconds == 0`. + pub fn new(slot: u64) -> TestingSlotClock { + TestingSlotClock { + slot: RwLock::new(slot), + } + } + + pub fn set_slot(&self, slot: u64) { + *self.slot.write().expect("TestingSlotClock poisoned.") = slot; + } +} + +impl SlotClock for TestingSlotClock { + type Error = Error; + + fn present_slot(&self) -> Result, Error> { + let slot = *self.slot.read().expect("TestingSlotClock poisoned."); + Ok(Some(Slot::new(slot))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_slot_now() { + let clock = TestingSlotClock::new(10); + assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); + clock.set_slot(123); + assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); + } +} diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml new file mode 100644 index 000000000..25326cb5b --- /dev/null +++ b/eth2/utils/ssz/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "ssz" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bytes = "0.4.9" +ethereum-types = "0.4.0" +hashing = { path = "../hashing" } diff --git a/eth2/utils/ssz/README.md b/eth2/utils/ssz/README.md new file mode 100644 index 000000000..7355ca4cc --- /dev/null +++ b/eth2/utils/ssz/README.md @@ -0,0 +1,543 @@ +# simpleserialize (ssz) [WIP] + +This is currently a ***Work In Progress*** crate. + +SimpleSerialize is a serialization protocol described by Vitalik Buterin. The +method is tentatively intended for use in the Ethereum Beacon Chain as +described in the [Ethereum 2.1 Spec](https://notes.ethereum.org/s/Syj3QZSxm). +The Beacon Chain specification is the core, canonical specification which we +are following. + +The current reference implementation has been described in the [Beacon Chain +Repository](https://github.com/ethereum/beacon_chain/blob/master/ssz/ssz.py). + +*Please Note: This implementation is presently a placeholder until the final +spec is decided.*\ +*Do not rely upon it for reference.* + + +## Table of Contents + +* [SimpleSerialize Overview](#simpleserialize-overview) + + [Serialize/Encode](#serializeencode) + - [int or uint: 8/16/24/32/64/256](#int-or-uint-816243264256) + - [Address](#address) + - [Hash32](#hash32) + - [Bytes](#bytes) + - [List](#list) + + [Deserialize/Decode](#deserializedecode) + - [Int or Uint: 8/16/24/32/64/256](#int-or-uint-816243264256) + - [Address](#address-1) + - [Hash32](#hash32-1) + - [Bytes](#bytes-1) + - [List](#list-1) +* [Technical Overview](#technical-overview) +* [Building](#building) + + [Installing Rust](#installing-rust) +* [Dependencies](#dependencies) + + [bytes v0.4.9](#bytes-v049) + + [ethereum-types](#ethereum-types) +* [Interface](#interface) + + [Encodable](#encodable) + + [Decodable](#decodable) + + [SszStream](#sszstream) + - [new()](#new) + - [append(&mut self, value: &E) -> &mut Self](#appendmut-self-value-e---mut-self) + - [append_encoded_val(&mut self, vec: &Vec)](#append_encoded_valmut-self-vec-vec) + - [append_vec(&mut self, vec: &Vec)](#append_vecmut-self-vec-vec) + - [drain(self) -> Vec](#drainself---vec) + + [decode_ssz(ssz_bytes: &(u8), index: usize) -> Result](#decode_sszssz_bytes-u8-index-usize---resultt-usize-decodeerror) + + [decode_ssz_list(ssz_bytes: &(u8), index: usize) -> Result, usize), DecodeError>](#decode_ssz_listssz_bytes-u8-index-usize---resultvec-usize-decodeerror) + + [decode_length(bytes: &(u8), index: usize, length_bytes: usize) -> Result](#decode_lengthbytes-u8-index-usize-length_bytes-usize---resultusize-decodeerror) +* [Usage](#usage) + + [Serializing/Encoding](#serializingencoding) + - [Rust](#rust) +* [Deserializing/Decoding](#deserializingdecoding) + - [Rust](#rust-1) + +--- + +## SimpleSerialize Overview + +The ``simpleserialize`` method for serialization follows simple byte conversion, +making it effective and efficient for encoding and decoding. + +The decoding requires knowledge of the data **type** and the order of the +serialization. + +Syntax: + +| Shorthand | Meaning | +|:-------------|:----------------------------------------------------| +| `big` | ``big endian`` | +| `to_bytes` | convert to bytes. Params: ``(size, byte order)`` | +| `from_bytes` | convert from bytes. Params: ``(bytes, byte order)`` | +| `value` | the value to serialize | +| `rawbytes` | raw encoded/serialized bytes | +| `len(value)` | get the length of the value. (number of bytes etc) | + +### Serialize/Encode + +#### int or uint: 8/16/24/32/64/256 + +Convert directly to bytes the size of the int. (e.g. ``int16 = 2 bytes``) + +All integers are serialized as **big endian**. + +| Check to perform | Code | +|:-----------------------|:------------------------| +| Int size is not 0 | ``int_size > 0`` | +| Size is a byte integer | ``int_size % 8 == 0`` | +| Value is less than max | ``2**int_size > value`` | + +```python +buffer_size = int_size / 8 +return value.to_bytes(buffer_size, 'big') +``` + +#### Address + +The address should already come as a hash/byte format. Ensure that length is +**20**. + +| Check to perform | Code | +|:-----------------------|:---------------------| +| Length is correct (20) | ``len(value) == 20`` | + +```python +assert( len(value) == 20 ) +return value +``` + +#### Hash32 + +The hash32 should already be a 32 byte length serialized data format. The safety +check ensures the 32 byte length is satisfied. + +| Check to perform | Code | +|:-----------------------|:---------------------| +| Length is correct (32) | ``len(value) == 32`` | + +```python +assert( len(value) == 32 ) +return value +``` + +#### Bytes + +For general `byte` type: +1. Get the length/number of bytes; Encode into a 4 byte integer. +2. Append the value to the length and return: ``[ length_bytes ] + [ + value_bytes ]`` + +```python +byte_length = (len(value)).to_bytes(4, 'big') +return byte_length + value +``` + +#### List + +For lists of values, get the length of the list and then serialize the value +of each item in the list: +1. For each item in list: + 1. serialize. + 2. append to string. +2. Get size of serialized string. Encode into a 4 byte integer. + +```python +serialized_list_string = '' + +for item in value: + serialized_list_string += serialize(item) + +serialized_len = len(serialized_list_string) + +return serialized_len + serialized_list_string +``` + +### Deserialize/Decode + +The decoding requires knowledge of the type of the item to be decoded. When +performing decoding on an entire serialized string, it also requires knowledge +of what order the objects have been serialized in. + +Note: Each return will provide ``deserialized_object, new_index`` keeping track +of the new index. + +At each step, the following checks should be made: + +| Check Type | Check | +|:-------------------------|:----------------------------------------------------------| +| Ensure sufficient length | ``length(rawbytes) > current_index + deserialize_length`` | + +#### Int or Uint: 8/16/24/32/64/256 + +Convert directly from bytes into integer utilising the number of bytes the same +size as the integer length. (e.g. ``int16 == 2 bytes``) + +All integers are interpreted as **big endian**. + +```python +byte_length = int_size / 8 +new_index = current_index + int_size +return int.from_bytes(rawbytes[current_index:current_index+int_size], 'big'), new_index +``` + +#### Address + +Return the 20 bytes. + +```python +new_index = current_index + 20 +return rawbytes[current_index:current_index+20], new_index +``` + +#### Hash32 + +Return the 32 bytes. + +```python +new_index = current_index + 32 +return rawbytes[current_index:current_index+32], new_index +``` + +#### Bytes + +Get the length of the bytes, return the bytes. + +```python +bytes_length = int.from_bytes(rawbytes[current_index:current_index+4], 'big') +new_index = current_index + 4 + bytes_lenth +return rawbytes[current_index+4:current_index+4+bytes_length], new_index +``` + +#### List + +Deserailize each object in the list. +1. Get the length of the serialized list. +2. Loop through deseralizing each item in the list until you reach the +entire length of the list. + + +| Check type | code | +|:------------------------------------|:--------------------------------------| +| rawbytes has enough left for length | ``len(rawbytes) > current_index + 4`` | + +```python +total_length = int.from_bytes(rawbytes[current_index:current_index+4], 'big') +new_index = current_index + 4 + total_length +item_index = current_index + 4 +deserialized_list = [] + +while item_index < new_index: + object, item_index = deserialize(rawbytes, item_index, item_type) + deserialized_list.append(object) + +return deserialized_list, new_index +``` + +## Technical Overview + +The SimpleSerialize is a simple method for serializing objects for use in the +Ethereum beacon chain proposed by Vitalik Buterin. There are currently two +implementations denoting the functionality, the [Reference +Implementation](https://github.com/ethereum/beacon_chain/blob/master/ssz/ssz.py) +and the [Module](https://github.com/ethereum/research/tree/master/py_ssz) in +Ethereum research. It is being developed as a crate for the [**Rust programming +language**](https://www.rust-lang.org). + +The crate will provide the functionality to serialize several types in +accordance with the spec and provide a serialized stream of bytes. + +## Building + +ssz currently builds on **rust v1.27.1** + +### Installing Rust + +The [**Rustup**](https://rustup.rs/) tool provides functionality to easily +manage rust on your local instance. It is a recommended method for installing +rust. + +Installing on Linux or OSX: + +```bash +curl https://sh.rustup.rs -sSf | sh +``` + +Installing on Windows: + +* 32 Bit: [ https://win.rustup.rs/i686 ](https://win.rustup.rs/i686) +* 64 Bit: [ https://win.rustup.rs/x86_64 ](https://win.rustup.rs/x86_64) + +## Dependencies + +All dependencies are listed in the ``Cargo.toml`` file. + +To build and install all related dependencies: + +```bash +cargo build +``` + +### bytes v0.4.9 + +The `bytes` crate provides effective Byte Buffer implementations and +interfaces. + +Documentation: [ https://docs.rs/bytes/0.4.9/bytes/ ](https://docs.rs/bytes/0.4.9/bytes/) + +### ethereum-types + +The `ethereum-types` provide primitives for types that are commonly used in the +ethereum protocol. This crate is provided by [Parity](https://www.parity.io/). + +Github: [ https://github.com/paritytech/primitives ](https://github.com/paritytech/primitives) + + +--- + +## Interface + +### Encodable + +A type is **Encodable** if it has a valid ``ssz_append`` function. This is +used to ensure that the object/type can be serialized. + +```rust +pub trait Encodable { + fn ssz_append(&self, s: &mut SszStream); +} +``` + +### Decodable + +A type is **Decodable** if it has a valid ``ssz_decode`` function. This is +used to ensure the object is deserializable. + +```rust +pub trait Decodable: Sized { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError>; +} +``` + +### SszStream + +The main implementation is the `SszStream` struct. The struct contains a +buffer of bytes, a Vector of `uint8`. + +#### new() + +Create a new, empty instance of the SszStream. + +**Example** + +```rust +let mut ssz = SszStream::new() +``` + +#### append(&mut self, value: &E) -> &mut Self + +Appends a value that can be encoded into the stream. + +| Parameter | Description | +|:---------:|:-----------------------------------------| +| ``value`` | Encodable value to append to the stream. | + +**Example** + +```rust +ssz.append(&x) +``` + +#### append_encoded_val(&mut self, vec: &Vec) + +Appends some ssz encoded bytes to the stream. + +| Parameter | Description | +|:---------:|:----------------------------------| +| ``vec`` | A vector of serialized ssz bytes. | + +**Example** + +```rust +let mut a = [0, 1]; +ssz.append_encoded_val(&a.to_vec()); +``` + +#### append_vec(&mut self, vec: &Vec) + +Appends some vector (list) of encodable values to the stream. + +| Parameter | Description | +|:---------:|:----------------------------------------------| +| ``vec`` | Vector of Encodable objects to be serialized. | + +**Example** + +```rust +ssz.append_vec(attestations); +``` + +#### drain(self) -> Vec + +Consumes the ssz stream and returns the buffer of bytes. + +**Example** + +```rust +ssz.drain() +``` + +### decode_ssz(ssz_bytes: &[u8], index: usize) -> Result<(T, usize), DecodeError> + +Decodes a single ssz serialized value of type `T`. Note: `T` must be decodable. + +| Parameter | Description | +|:-------------:|:------------------------------------| +| ``ssz_bytes`` | Serialized list of bytes. | +| ``index`` | Starting index to deserialize from. | + +**Returns** + +| Return Value | Description | +|:-------------------:|:----------------------------------------------| +| ``Tuple(T, usize)`` | Returns the tuple of the type and next index. | +| ``DecodeError`` | Error if the decoding could not be performed. | + +**Example** + +```rust +let res: Result<(u16, usize), DecodeError> = decode_ssz(&encoded_ssz, 0); +``` + +### decode_ssz_list(ssz_bytes: &[u8], index: usize) -> Result<(Vec, usize), DecodeError> + +Decodes a list of serialized values into a vector. + +| Parameter | Description | +|:-------------:|:------------------------------------| +| ``ssz_bytes`` | Serialized list of bytes. | +| ``index`` | Starting index to deserialize from. | + +**Returns** + +| Return Value | Description | +|:------------------------:|:----------------------------------------------| +| ``Tuple(Vec, usize)`` | Returns the tuple of the type and next index. | +| ``DecodeError`` | Error if the decoding could not be performed. | + +**Example** + +```rust +let decoded: Result<(Vec, usize), DecodeError> = decode_ssz_list( &encoded_ssz, 0); +``` + +### decode_length(bytes: &[u8], index: usize, length_bytes: usize) -> Result + +Deserializes the "length" value in the serialized bytes from the index. The +length of bytes is given (usually 4 stated in the reference implementation) and +is often the value appended to the list infront of the actual serialized +object. + +| Parameter | Description | +|:----------------:|:-------------------------------------------| +| ``bytes`` | Serialized list of bytes. | +| ``index`` | Starting index to deserialize from. | +| ``length_bytes`` | Number of bytes to deserialize into usize. | + + +**Returns** + +| Return Value | Description | +|:---------------:|:-----------------------------------------------------------| +| ``usize`` | The length of the serialized object following this length. | +| ``DecodeError`` | Error if the decoding could not be performed. | + +**Example** + +```rust +let length_of_serialized: Result = decode_length(&encoded, 0, 4); +``` + +--- + +## Usage + +### Serializing/Encoding + +#### Rust + +Create the `simpleserialize` stream that will produce the serialized objects. + +```rust +let mut ssz = SszStream::new(); +``` + +Encode the values that you need by using the ``append(..)`` method on the `SszStream`. + +The **append** function is how the value gets serialized. + +```rust +let x: u64 = 1 << 32; +ssz.append(&x); +``` + +To get the serialized byte vector use ``drain()`` on the `SszStream`. + +```rust +ssz.drain() +``` + +**Example** + +```rust +// 1 << 32 = 4294967296; +// As bytes it should equal: [0,0,0,1,0,0,0] +let x: u64 = 1 << 32; + +// Create the new ssz stream +let mut ssz = SszStream::new(); + +// Serialize x +ssz.append(&x); + +// Check that it is correct. +assert_eq!(ssz.drain(), vec![0,0,0,1,0,0,0]); +``` + +## Deserializing/Decoding + +#### Rust + +From the `simpleserialize` bytes, we are converting to the object. + +```rust +let ssz = vec![0, 0, 8, 255, 255, 255, 255, 255, 255, 255, 255]; + +// Returns the result and the next index to decode. +let (result, index): (u64, usize) = decode_ssz(&ssz, 3).unwrap(); + +// Check for correctness +// 2**64-1 = 18446744073709551615 +assert_eq!(result, 18446744073709551615); +// Index = 3 (initial index) + 8 (8 byte int) = 11 +assert_eq!(index, 11); +``` + +Decoding a list of items: + +```rust +// Encoded/Serialized list with junk numbers at the front +let serialized_list = vec![ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 32, 0, 0, 0, + 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, + 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15]; + +// Returns the result (Vector of usize) and the index of the next +let decoded: (Vec, usize) = decode_ssz_list(&serialized_list, 10).unwrap(); + +// Check for correctness +assert_eq!(decoded.0, vec![15,15,15,15]); + +assert_eq!(decoded.1, 46); +``` diff --git a/eth2/utils/ssz/src/decode.rs b/eth2/utils/ssz/src/decode.rs new file mode 100644 index 000000000..426baeace --- /dev/null +++ b/eth2/utils/ssz/src/decode.rs @@ -0,0 +1,193 @@ +use super::LENGTH_BYTES; + +#[derive(Debug, PartialEq)] +pub enum DecodeError { + TooShort, + TooLong, + Invalid, +} + +pub trait Decodable: Sized { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError>; +} + +/// Decode the given bytes for the given type +/// +/// The single ssz encoded value will be decoded as the given type at the +/// given index. +pub fn decode_ssz(ssz_bytes: &[u8], index: usize) -> Result<(T, usize), DecodeError> +where + T: Decodable, +{ + if index >= ssz_bytes.len() { + return Err(DecodeError::TooShort); + } + T::ssz_decode(ssz_bytes, index) +} + +/// Decode a vector (list) of encoded bytes. +/// +/// Each element in the list will be decoded and placed into the vector. +pub fn decode_ssz_list(ssz_bytes: &[u8], index: usize) -> Result<(Vec, usize), DecodeError> +where + T: Decodable, +{ + if index + LENGTH_BYTES > ssz_bytes.len() { + return Err(DecodeError::TooShort); + }; + + // get the length + let serialized_length = match decode_length(ssz_bytes, index, LENGTH_BYTES) { + Err(v) => return Err(v), + Ok(v) => v, + }; + + let final_len: usize = index + LENGTH_BYTES + serialized_length; + + if final_len > ssz_bytes.len() { + return Err(DecodeError::TooShort); + }; + + let mut tmp_index = index + LENGTH_BYTES; + let mut res_vec: Vec = Vec::new(); + + while tmp_index < final_len { + match T::ssz_decode(ssz_bytes, tmp_index) { + Err(v) => return Err(v), + Ok(v) => { + tmp_index = v.1; + res_vec.push(v.0); + } + }; + } + + Ok((res_vec, final_len)) +} + +/// Given some number of bytes, interpret the first four +/// bytes as a 32-bit big-endian integer and return the +/// result. +pub fn decode_length( + bytes: &[u8], + index: usize, + length_bytes: usize, +) -> Result { + if bytes.len() < index + length_bytes { + return Err(DecodeError::TooShort); + }; + let mut len: usize = 0; + for (i, byte) in bytes + .iter() + .enumerate() + .take(index + length_bytes) + .skip(index) + { + let offset = (index + length_bytes - i - 1) * 8; + len |= (*byte as usize) << offset; + } + Ok(len) +} + +#[cfg(test)] +mod tests { + use super::super::encode::encode_length; + use super::*; + + #[test] + fn test_ssz_decode_length() { + let decoded = decode_length(&vec![0, 0, 0, 1], 0, LENGTH_BYTES); + assert_eq!(decoded.unwrap(), 1); + + let decoded = decode_length(&vec![0, 0, 1, 0], 0, LENGTH_BYTES); + assert_eq!(decoded.unwrap(), 256); + + let decoded = decode_length(&vec![0, 0, 1, 255], 0, LENGTH_BYTES); + assert_eq!(decoded.unwrap(), 511); + + let decoded = decode_length(&vec![255, 255, 255, 255], 0, LENGTH_BYTES); + assert_eq!(decoded.unwrap(), 4294967295); + } + + #[test] + fn test_encode_decode_length() { + let params: Vec = vec![ + 0, + 1, + 2, + 3, + 7, + 8, + 16, + 2 ^ 8, + 2 ^ 8 + 1, + 2 ^ 16, + 2 ^ 16 + 1, + 2 ^ 24, + 2 ^ 24 + 1, + 2 ^ 32, + ]; + for i in params { + let decoded = decode_length(&encode_length(i, LENGTH_BYTES), 0, LENGTH_BYTES).unwrap(); + assert_eq!(i, decoded); + } + } + + #[test] + fn test_decode_ssz_list() { + // u16 + let v: Vec = vec![10, 10, 10, 10]; + let decoded: (Vec, usize) = + decode_ssz_list(&vec![0, 0, 0, 8, 0, 10, 0, 10, 0, 10, 0, 10], 0).unwrap(); + + assert_eq!(decoded.0, v); + assert_eq!(decoded.1, 12); + + // u32 + let v: Vec = vec![10, 10, 10, 10]; + let decoded: (Vec, usize) = decode_ssz_list( + &vec![ + 0, 0, 0, 16, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, + ], + 0, + ) + .unwrap(); + assert_eq!(decoded.0, v); + assert_eq!(decoded.1, 20); + + // u64 + let v: Vec = vec![10, 10, 10, 10]; + let decoded: (Vec, usize) = decode_ssz_list( + &vec![ + 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, + 10, 0, 0, 0, 0, 0, 0, 0, 10, + ], + 0, + ) + .unwrap(); + assert_eq!(decoded.0, v); + assert_eq!(decoded.1, 36); + + // Check that it can accept index + let v: Vec = vec![15, 15, 15, 15]; + let decoded: (Vec, usize) = decode_ssz_list( + &vec![ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, + 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, + ], + 10, + ) + .unwrap(); + assert_eq!(decoded.0, v); + assert_eq!(decoded.1, 46); + + // Check that length > bytes throws error + let decoded: Result<(Vec, usize), DecodeError> = + decode_ssz_list(&vec![0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15], 0); + assert_eq!(decoded, Err(DecodeError::TooShort)); + + // Check that incorrect index throws error + let decoded: Result<(Vec, usize), DecodeError> = + decode_ssz_list(&vec![0, 0, 0, 0, 0, 0, 0, 15], 16); + assert_eq!(decoded, Err(DecodeError::TooShort)); + } +} diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs new file mode 100644 index 000000000..dfb969a8d --- /dev/null +++ b/eth2/utils/ssz/src/encode.rs @@ -0,0 +1,124 @@ +use super::LENGTH_BYTES; + +pub trait Encodable { + fn ssz_append(&self, s: &mut SszStream); +} + +/// Provides a buffer for appending ssz-encodable values. +/// +/// Use the `append()` fn to add a value to a list, then use +/// the `drain()` method to consume the struct and return the +/// ssz encoded bytes. +#[derive(Default)] +pub struct SszStream { + buffer: Vec, +} + +impl SszStream { + /// Create a new, empty stream for writing ssz values. + pub fn new() -> Self { + SszStream { buffer: Vec::new() } + } + + /// Append some ssz encodable value to the stream. + pub fn append(&mut self, value: &E) -> &mut Self + where + E: Encodable, + { + value.ssz_append(self); + self + } + + /// Append some ssz encoded bytes to the stream. + /// + /// The length of the supplied bytes will be concatenated + /// to the stream before the supplied bytes. + pub fn append_encoded_val(&mut self, vec: &[u8]) { + self.buffer + .extend_from_slice(&encode_length(vec.len(), LENGTH_BYTES)); + self.buffer.extend_from_slice(&vec); + } + + /// Append some ssz encoded bytes to the stream without calculating length + /// + /// The raw bytes will be concatenated to the stream. + pub fn append_encoded_raw(&mut self, vec: &[u8]) { + self.buffer.extend_from_slice(&vec); + } + + /// Append some vector (list) of encodable values to the stream. + /// + /// The length of the list will be concatenated to the stream, then + /// each item in the vector will be encoded and concatenated. + pub fn append_vec(&mut self, vec: &[E]) + where + E: Encodable, + { + let mut list_stream = SszStream::new(); + for item in vec { + item.ssz_append(&mut list_stream); + } + self.append_encoded_val(&list_stream.drain()); + } + + /// Consume the stream and return the underlying bytes. + pub fn drain(self) -> Vec { + self.buffer + } +} + +/// Encode some length into a ssz size prefix. +/// +/// The ssz size prefix is 4 bytes, which is treated as a continuious +/// 32bit big-endian integer. +pub fn encode_length(len: usize, length_bytes: usize) -> Vec { + assert!(length_bytes > 0); // For sanity + assert!((len as usize) < 2usize.pow(length_bytes as u32 * 8)); + let mut header: Vec = vec![0; length_bytes]; + for (i, header_byte) in header.iter_mut().enumerate() { + let offset = (length_bytes - i - 1) * 8; + *header_byte = ((len >> offset) & 0xff) as u8; + } + header +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic] + fn test_encode_length_0_bytes_panic() { + encode_length(0, 0); + } + + #[test] + fn test_encode_length_4_bytes() { + assert_eq!(encode_length(0, LENGTH_BYTES), vec![0; 4]); + assert_eq!(encode_length(1, LENGTH_BYTES), vec![0, 0, 0, 1]); + assert_eq!(encode_length(255, LENGTH_BYTES), vec![0, 0, 0, 255]); + assert_eq!(encode_length(256, LENGTH_BYTES), vec![0, 0, 1, 0]); + assert_eq!( + encode_length(4294967295, LENGTH_BYTES), // 2^(3*8) - 1 + vec![255, 255, 255, 255] + ); + } + + #[test] + #[should_panic] + fn test_encode_length_4_bytes_panic() { + encode_length(4294967296, LENGTH_BYTES); // 2^(3*8) + } + + #[test] + fn test_encode_list() { + let test_vec: Vec = vec![256; 12]; + let mut stream = SszStream::new(); + stream.append_vec(&test_vec); + let ssz = stream.drain(); + + assert_eq!(ssz.len(), 4 + (12 * 2)); + assert_eq!(ssz[0..4], *vec![0, 0, 0, 24]); + assert_eq!(ssz[4..6], *vec![1, 0]); + } +} diff --git a/eth2/utils/ssz/src/impl_decode.rs b/eth2/utils/ssz/src/impl_decode.rs new file mode 100644 index 000000000..134e438e1 --- /dev/null +++ b/eth2/utils/ssz/src/impl_decode.rs @@ -0,0 +1,218 @@ +use super::decode::decode_ssz_list; +use super::ethereum_types::{Address, H256}; +use super::{Decodable, DecodeError}; + +macro_rules! impl_decodable_for_uint { + ($type: ident, $bit_size: expr) => { + impl Decodable for $type { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + assert!((0 < $bit_size) & ($bit_size <= 64) & ($bit_size % 8 == 0)); + let max_bytes = $bit_size / 8; + if bytes.len() >= (index + max_bytes) { + let end_bytes = index + max_bytes; + let mut result: $type = 0; + for (i, byte) in bytes.iter().enumerate().take(end_bytes).skip(index) { + let offset = (end_bytes - i - 1) * 8; + result |= ($type::from(*byte)) << offset; + } + Ok((result, end_bytes)) + } else { + Err(DecodeError::TooShort) + } + } + } + }; +} + +impl_decodable_for_uint!(u16, 16); +impl_decodable_for_uint!(u32, 32); +impl_decodable_for_uint!(u64, 64); +impl_decodable_for_uint!(usize, 64); + +impl Decodable for u8 { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + if index >= bytes.len() { + Err(DecodeError::TooShort) + } else { + Ok((bytes[index], index + 1)) + } + } +} + +impl Decodable for H256 { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + if bytes.len() < 32 || bytes.len() - 32 < index { + Err(DecodeError::TooShort) + } else { + Ok((H256::from(&bytes[index..(index + 32)]), index + 32)) + } + } +} + +impl Decodable for Address { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + if bytes.len() < 20 || bytes.len() - 20 < index { + Err(DecodeError::TooShort) + } else { + Ok((Address::from(&bytes[index..(index + 20)]), index + 20)) + } + } +} + +impl Decodable for Vec +where + T: Decodable, +{ + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + decode_ssz_list(bytes, index) + } +} + +#[cfg(test)] +mod tests { + use super::super::{decode_ssz, DecodeError}; + use super::*; + + #[test] + fn test_ssz_decode_h256() { + /* + * Input is exact length + */ + let input = vec![42_u8; 32]; + let (decoded, i) = H256::ssz_decode(&input, 0).unwrap(); + assert_eq!(decoded.to_vec(), input); + assert_eq!(i, 32); + + /* + * Input is too long + */ + let mut input = vec![42_u8; 32]; + input.push(12); + let (decoded, i) = H256::ssz_decode(&input, 0).unwrap(); + assert_eq!(decoded.to_vec()[..], input[0..32]); + assert_eq!(i, 32); + + /* + * Input is too short + */ + let input = vec![42_u8; 31]; + let res = H256::ssz_decode(&input, 0); + assert_eq!(res, Err(DecodeError::TooShort)); + } + + #[test] + fn test_ssz_decode_u16() { + let ssz = vec![0, 0]; + + let (result, index): (u16, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(result, 0); + assert_eq!(index, 2); + + let ssz = vec![0, 16]; + let (result, index): (u16, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(result, 16); + assert_eq!(index, 2); + + let ssz = vec![1, 0]; + let (result, index): (u16, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(result, 256); + assert_eq!(index, 2); + + let ssz = vec![255, 255]; + let (result, index): (u16, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 2); + assert_eq!(result, 65535); + + let ssz = vec![1]; + let result: Result<(u16, usize), DecodeError> = decode_ssz(&ssz, 0); + assert_eq!(result, Err(DecodeError::TooShort)); + } + + #[test] + fn test_ssz_decode_u32() { + let ssz = vec![0, 0, 0, 0]; + let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(result, 0); + assert_eq!(index, 4); + + let ssz = vec![0, 0, 1, 0]; + let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 4); + assert_eq!(result, 256); + + let ssz = vec![255, 255, 255, 0, 0, 1, 0]; + let (result, index): (u32, usize) = decode_ssz(&ssz, 3).unwrap(); + assert_eq!(index, 7); + assert_eq!(result, 256); + + let ssz = vec![0, 200, 1, 0]; + let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 4); + assert_eq!(result, 13107456); + + let ssz = vec![255, 255, 255, 255]; + let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 4); + assert_eq!(result, 4294967295); + + let ssz = vec![0, 0, 1]; + let result: Result<(u32, usize), DecodeError> = decode_ssz(&ssz, 0); + assert_eq!(result, Err(DecodeError::TooShort)); + } + + #[test] + fn test_ssz_decode_u64() { + let ssz = vec![0, 0, 0, 0, 0, 0, 0, 0]; + let (result, index): (u64, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 8); + assert_eq!(result, 0); + + let ssz = vec![255, 255, 255, 255, 255, 255, 255, 255]; + let (result, index): (u64, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 8); + assert_eq!(result, 18446744073709551615); + + let ssz = vec![0, 0, 8, 255, 0, 0, 0, 0, 0, 0, 0]; + let (result, index): (u64, usize) = decode_ssz(&ssz, 3).unwrap(); + assert_eq!(index, 11); + assert_eq!(result, 18374686479671623680); + + let ssz = vec![0, 0, 0, 0, 0, 0, 0]; + let result: Result<(u64, usize), DecodeError> = decode_ssz(&ssz, 0); + assert_eq!(result, Err(DecodeError::TooShort)); + } + + #[test] + fn test_ssz_decode_usize() { + let ssz = vec![0, 0, 0, 0, 0, 0, 0, 0]; + let (result, index): (usize, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 8); + assert_eq!(result, 0); + + let ssz = vec![0, 0, 8, 255, 255, 255, 255, 255, 255, 255, 255]; + let (result, index): (usize, usize) = decode_ssz(&ssz, 3).unwrap(); + assert_eq!(index, 11); + assert_eq!(result, 18446744073709551615); + + let ssz = vec![255, 255, 255, 255, 255, 255, 255, 255, 255]; + let (result, index): (usize, usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 8); + assert_eq!(result, 18446744073709551615); + + let ssz = vec![0, 0, 0, 0, 0, 0, 1]; + let result: Result<(usize, usize), DecodeError> = decode_ssz(&ssz, 0); + assert_eq!(result, Err(DecodeError::TooShort)); + } + + #[test] + fn test_decode_ssz_bounds() { + let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![1], 2); + assert_eq!(err, Err(DecodeError::TooShort)); + + let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![0, 0, 0, 0], 3); + assert_eq!(err, Err(DecodeError::TooShort)); + + let result: u16 = decode_ssz(&vec![0, 0, 0, 0, 1], 3).unwrap().0; + assert_eq!(result, 1); + } +} diff --git a/eth2/utils/ssz/src/impl_encode.rs b/eth2/utils/ssz/src/impl_encode.rs new file mode 100644 index 000000000..8714cf75f --- /dev/null +++ b/eth2/utils/ssz/src/impl_encode.rs @@ -0,0 +1,209 @@ +extern crate bytes; + +use self::bytes::{BufMut, BytesMut}; +use super::ethereum_types::{Address, H256}; +use super::{Encodable, SszStream}; + +/* + * Note: there is a "to_bytes" function for integers + * in Rust nightly. When it is in stable, we should + * use it instead. + */ +macro_rules! impl_encodable_for_uint { + ($type: ident, $bit_size: expr) => { + impl Encodable for $type { + #[allow(clippy::cast_lossless)] + fn ssz_append(&self, s: &mut SszStream) { + // Ensure bit size is valid + assert!( + (0 < $bit_size) + && ($bit_size % 8 == 0) + && (2_u128.pow($bit_size) > *self as u128) + ); + + // Serialize to bytes + let mut buf = BytesMut::with_capacity($bit_size / 8); + + // Match bit size with encoding + match $bit_size { + 8 => buf.put_u8(*self as u8), + 16 => buf.put_u16_be(*self as u16), + 32 => buf.put_u32_be(*self as u32), + 64 => buf.put_u64_be(*self as u64), + _ => {} + } + + // Append bytes to the SszStream + s.append_encoded_raw(&buf.to_vec()); + } + } + }; +} + +impl_encodable_for_uint!(u8, 8); +impl_encodable_for_uint!(u16, 16); +impl_encodable_for_uint!(u32, 32); +impl_encodable_for_uint!(u64, 64); +impl_encodable_for_uint!(usize, 64); + +impl Encodable for H256 { + fn ssz_append(&self, s: &mut SszStream) { + s.append_encoded_raw(&self.to_vec()); + } +} + +impl Encodable for Address { + fn ssz_append(&self, s: &mut SszStream) { + s.append_encoded_raw(&self.to_vec()); + } +} + +impl Encodable for Vec +where + T: Encodable, +{ + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(&self); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ssz_encode_h256() { + let h = H256::zero(); + let mut ssz = SszStream::new(); + ssz.append(&h); + assert_eq!(ssz.drain(), vec![0; 32]); + } + + #[test] + fn test_ssz_encode_address() { + let h = Address::zero(); + let mut ssz = SszStream::new(); + ssz.append(&h); + assert_eq!(ssz.drain(), vec![0; 20]); + } + + #[test] + fn test_ssz_encode_u8() { + let x: u8 = 0; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0]); + + let x: u8 = 1; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![1]); + + let x: u8 = 100; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![100]); + + let x: u8 = 255; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![255]); + } + + #[test] + fn test_ssz_encode_u16() { + let x: u16 = 1; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 1]); + + let x: u16 = 100; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 100]); + + let x: u16 = 1 << 8; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![1, 0]); + + let x: u16 = 65535; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![255, 255]); + } + + #[test] + fn test_ssz_encode_u32() { + let x: u32 = 1; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 1]); + + let x: u32 = 100; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 100]); + + let x: u32 = 1 << 16; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 1, 0, 0]); + + let x: u32 = 1 << 24; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![1, 0, 0, 0]); + + let x: u32 = !0; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![255, 255, 255, 255]); + } + + #[test] + fn test_ssz_encode_u64() { + let x: u64 = 1; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 0, 0, 0, 0, 1]); + + let x: u64 = 100; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 0, 0, 0, 0, 100]); + + let x: u64 = 1 << 32; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 1, 0, 0, 0, 0]); + + let x: u64 = !0; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![255, 255, 255, 255, 255, 255, 255, 255]); + } + + #[test] + fn test_ssz_encode_usize() { + let x: usize = 1; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 0, 0, 0, 0, 1]); + + let x: usize = 100; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 0, 0, 0, 0, 100]); + + let x: usize = 1 << 32; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![0, 0, 0, 1, 0, 0, 0, 0]); + + let x: usize = !0; + let mut ssz = SszStream::new(); + ssz.append(&x); + assert_eq!(ssz.drain(), vec![255, 255, 255, 255, 255, 255, 255, 255]); + } +} diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs new file mode 100644 index 000000000..578977eec --- /dev/null +++ b/eth2/utils/ssz/src/impl_tree_hash.rs @@ -0,0 +1,79 @@ +use super::ethereum_types::{Address, H256}; +use super::{merkle_hash, ssz_encode, TreeHash}; +use hashing::hash; + +impl TreeHash for u8 { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for u16 { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for u32 { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for u64 { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for usize { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for Address { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for H256 { + fn hash_tree_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for [u8] { + fn hash_tree_root(&self) -> Vec { + if self.len() > 32 { + return hash(&self); + } + self.to_vec() + } +} + +impl TreeHash for Vec +where + T: TreeHash, +{ + /// Returns the merkle_hash of a list of hash_tree_root values created + /// from the given list. + /// Note: A byte vector, Vec, must be converted to a slice (as_slice()) + /// to be handled properly (i.e. hashed) as byte array. + fn hash_tree_root(&self) -> Vec { + let mut tree_hashes = self.iter().map(|x| x.hash_tree_root()).collect(); + merkle_hash(&mut tree_hashes) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_impl_tree_hash_vec() { + let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root(); + assert_eq!(result.len(), 32); + } +} diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs new file mode 100644 index 000000000..a6baa35a7 --- /dev/null +++ b/eth2/utils/ssz/src/lib.rs @@ -0,0 +1,38 @@ +/* + * This is a WIP of implementing an alternative + * serialization strategy. It attempts to follow Vitalik's + * "simpleserialize" format here: + * https://github.com/ethereum/beacon_chain/blob/master/beacon_chain/utils/simpleserialize.py + * + * This implementation is not final and would almost certainly + * have issues. + */ +extern crate bytes; +extern crate ethereum_types; + +pub mod decode; +pub mod encode; +pub mod tree_hash; + +mod impl_decode; +mod impl_encode; +mod impl_tree_hash; + +pub use crate::decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError}; +pub use crate::encode::{Encodable, SszStream}; +pub use crate::tree_hash::{merkle_hash, TreeHash}; + +pub use hashing::hash; + +pub const LENGTH_BYTES: usize = 4; +pub const MAX_LIST_SIZE: usize = 1 << (4 * 8); + +/// Convenience function to SSZ encode an object supporting ssz::Encode. +pub fn ssz_encode(val: &T) -> Vec +where + T: Encodable, +{ + let mut ssz_stream = SszStream::new(); + ssz_stream.append(val); + ssz_stream.drain() +} diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs new file mode 100644 index 000000000..a9ab0f467 --- /dev/null +++ b/eth2/utils/ssz/src/tree_hash.rs @@ -0,0 +1,83 @@ +use hashing::hash; + +const SSZ_CHUNK_SIZE: usize = 128; +const HASHSIZE: usize = 32; + +pub trait TreeHash { + fn hash_tree_root(&self) -> Vec; +} + +/// Returns a 32 byte hash of 'list' - a vector of byte vectors. +/// Note that this will consume 'list'. +pub fn merkle_hash(list: &mut Vec>) -> Vec { + // flatten list + let (mut chunk_size, mut chunkz) = list_to_blob(list); + + // get data_len as bytes. It will hashed will the merkle root + let datalen = list.len().to_le_bytes(); + + // Tree-hash + while chunkz.len() > HASHSIZE { + let mut new_chunkz: Vec = Vec::new(); + + for two_chunks in chunkz.chunks(chunk_size * 2) { + if two_chunks.len() == chunk_size { + // Odd number of chunks + let mut c = two_chunks.to_vec(); + c.append(&mut vec![0; SSZ_CHUNK_SIZE]); + new_chunkz.append(&mut hash(&c)); + } else { + // Hash two chuncks together + new_chunkz.append(&mut hash(two_chunks)); + } + } + + chunk_size = HASHSIZE; + chunkz = new_chunkz; + } + + chunkz.append(&mut datalen.to_vec()); + hash(&chunkz) +} + +fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { + let chunk_size = if list.is_empty() { + SSZ_CHUNK_SIZE + } else if list[0].len() < SSZ_CHUNK_SIZE { + let items_per_chunk = SSZ_CHUNK_SIZE / list[0].len(); + items_per_chunk * list[0].len() + } else { + list[0].len() + }; + + let mut data = Vec::new(); + if list.is_empty() { + // handle and empty list + data.append(&mut vec![0; SSZ_CHUNK_SIZE]); + } else { + // just create a blob here; we'll divide into + // chunked slices when we merklize + data.reserve(list[0].len() * list.len()); + for item in list.iter_mut() { + data.append(item); + } + } + (chunk_size, data) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_merkle_hash() { + let data1 = vec![1; 100]; + let data2 = vec![2; 100]; + let data3 = vec![3; 100]; + let mut list = vec![data1, data2, data3]; + let result = merkle_hash(&mut list); + + //note: should test againt a known test hash value + assert_eq!(HASHSIZE, result.len()); + } +} diff --git a/eth2/utils/vec_shuffle/Cargo.toml b/eth2/utils/vec_shuffle/Cargo.toml new file mode 100644 index 000000000..aaeb50074 --- /dev/null +++ b/eth2/utils/vec_shuffle/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "vec_shuffle" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +hashing = { path = "../hashing" } + +[dev-dependencies] +yaml-rust = "0.4.2" diff --git a/eth2/utils/vec_shuffle/src/lib.rs b/eth2/utils/vec_shuffle/src/lib.rs new file mode 100644 index 000000000..78bb8aa10 --- /dev/null +++ b/eth2/utils/vec_shuffle/src/lib.rs @@ -0,0 +1,81 @@ +/// A library for performing deterministic, pseudo-random shuffling on a vector. +/// +/// This library is designed to confirm to the Ethereum 2.0 specification. +extern crate hashing; + +mod rng; + +use self::rng::ShuffleRng; + +#[derive(Debug)] +pub enum ShuffleErr { + ExceedsListLength, +} + +/// Performs a deterministic, in-place shuffle of a vector. +/// +/// The final order of the shuffle is determined by successive hashes +/// of the supplied `seed`. +/// +/// This is a Fisher-Yates-Durtstenfeld shuffle. +pub fn shuffle(seed: &[u8], mut list: Vec) -> Result, ShuffleErr> { + let mut rng = ShuffleRng::new(seed); + + if list.len() > rng.rand_max as usize { + return Err(ShuffleErr::ExceedsListLength); + } + + if list.is_empty() { + return Ok(list); + } + + for i in 0..(list.len() - 1) { + let n = list.len() - i; + let j = rng.rand_range(n as u32) as usize + i; + list.swap(i, j); + } + Ok(list) +} + +#[cfg(test)] +mod tests { + extern crate yaml_rust; + + use self::yaml_rust::yaml; + + use std::{fs::File, io::prelude::*, path::PathBuf}; + + use super::{hashing::hash, *}; + + #[test] + fn test_shuffling() { + let mut file = { + let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + file_path_buf.push("src/specs/shuffle_test_vectors.yaml"); + + File::open(file_path_buf).unwrap() + }; + + let mut yaml_str = String::new(); + + file.read_to_string(&mut yaml_str).unwrap(); + + let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); + let doc = &docs[0]; + let test_cases = doc["test_cases"].as_vec().unwrap(); + + for test_case in test_cases { + let input = test_case["input"].clone().into_vec().unwrap(); + let output = test_case["output"].clone().into_vec().unwrap(); + let seed_bytes = test_case["seed"].as_str().unwrap().as_bytes(); + + let seed = if seed_bytes.len() > 0 { + hash(seed_bytes) + } else { + vec![] + }; + + assert_eq!(shuffle(&seed, input).unwrap(), output); + } + } +} diff --git a/eth2/utils/vec_shuffle/src/rng.rs b/eth2/utils/vec_shuffle/src/rng.rs new file mode 100644 index 000000000..7a4a785ff --- /dev/null +++ b/eth2/utils/vec_shuffle/src/rng.rs @@ -0,0 +1,90 @@ +use super::hashing::hash; + +const SEED_SIZE_BYTES: usize = 32; +const RAND_BYTES: usize = 3; // 24 / 8 +const RAND_MAX: u32 = 16_777_215; // 2 ** (rand_bytes * 8) - 1 + +/// A pseudo-random number generator which given a seed +/// uses successive blake2s hashing to generate "entropy". +pub struct ShuffleRng { + seed: Vec, + idx: usize, + pub rand_max: u32, +} + +impl ShuffleRng { + /// Create a new instance given some "seed" bytes. + pub fn new(initial_seed: &[u8]) -> Self { + Self { + seed: hash(initial_seed), + idx: 0, + rand_max: RAND_MAX, + } + } + + /// "Regenerates" the seed by hashing it. + fn rehash_seed(&mut self) { + self.seed = hash(&self.seed); + self.idx = 0; + } + + /// Extracts 3 bytes from the `seed`. Rehashes seed if required. + fn rand(&mut self) -> u32 { + self.idx += RAND_BYTES; + if self.idx >= SEED_SIZE_BYTES { + self.rehash_seed(); + self.rand() + } else { + int_from_byte_slice(&self.seed, self.idx - RAND_BYTES) + } + } + + /// Generate a random u32 below the specified maximum `n`. + /// + /// Provides a filtered result from a higher-level rng, by discarding + /// results which may bias the output. Because of this, execution time is + /// not linear and may potentially be infinite. + pub fn rand_range(&mut self, n: u32) -> u32 { + assert!(n < RAND_MAX, "RAND_MAX exceed"); + let mut x = self.rand(); + while x >= self.rand_max - (self.rand_max % n) { + x = self.rand(); + } + x % n + } +} + +/// Reads the next three bytes of `source`, starting from `offset` and +/// interprets those bytes as a 24 bit big-endian integer. +/// Returns that integer. +fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 { + (u32::from(source[offset + 2])) + | (u32::from(source[offset + 1]) << 8) + | (u32::from(source[offset]) << 16) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_shuffling_int_from_slice() { + let mut x = int_from_byte_slice(&[0, 0, 1], 0); + assert_eq!((x as u32), 1); + + x = int_from_byte_slice(&[0, 1, 1], 0); + assert_eq!(x, 257); + + x = int_from_byte_slice(&[1, 1, 1], 0); + assert_eq!(x, 65793); + + x = int_from_byte_slice(&[255, 1, 1], 0); + assert_eq!(x, 16711937); + + x = int_from_byte_slice(&[255, 255, 255], 0); + assert_eq!(x, 16777215); + + x = int_from_byte_slice(&[0x8f, 0xbb, 0xc7], 0); + assert_eq!(x, 9419719); + } +} diff --git a/eth2/utils/vec_shuffle/src/specs/shuffle_test_vectors.yaml b/eth2/utils/vec_shuffle/src/specs/shuffle_test_vectors.yaml new file mode 100644 index 000000000..2571f0804 --- /dev/null +++ b/eth2/utils/vec_shuffle/src/specs/shuffle_test_vectors.yaml @@ -0,0 +1,131 @@ +title: Shuffling Algorithm Tests +summary: Test vectors for shuffling a list based upon a seed. +test_suite: Shuffling + +test_cases: +- input: [] + output: [] + seed: '' +- input: [0] + output: [0] + seed: '' +- input: [255] + output: [255] + seed: '' +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 1, 1, 5, 6, 6, 6, 2, 4, 4] + seed: '' +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [4, 9, 6, 8, 13, 3, 2, 11, 5, 1, 12, 7, 10] + seed: '' +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 1, 1, 5, 6, 6, 6, 2, 4, 65] + seed: '' +- input: [] + output: [] + seed: 4kn4driuctg8 +- input: [0] + output: [0] + seed: 4kn4driuctg8 +- input: [255] + output: [255] + seed: 4kn4driuctg8 +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 4, 4, 2, 1, 1, 6, 5, 6, 6] + seed: 4kn4driuctg8 +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [7, 6, 3, 12, 11, 1, 8, 13, 10, 5, 9, 4, 2] + seed: 4kn4driuctg8 +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 4, 65, 2, 1, 1, 6, 5, 6, 6] + seed: 4kn4driuctg8 +- input: [] + output: [] + seed: ytre1p +- input: [0] + output: [0] + seed: ytre1p +- input: [255] + output: [255] + seed: ytre1p +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 1, 1, 5, 6, 2, 6, 2, 4, 4] + seed: ytre1p +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [6, 2, 3, 4, 8, 5, 12, 9, 7, 11, 10, 1, 13] + seed: ytre1p +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 1, 1, 5, 6, 2, 6, 2, 4, 65] + seed: ytre1p +- input: [] + output: [] + seed: mytobcffnkvj +- input: [0] + output: [0] + seed: mytobcffnkvj +- input: [255] + output: [255] + seed: mytobcffnkvj +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 4, 1, 1, 6, 4, 6, 5, 6, 2] + seed: mytobcffnkvj +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [11, 5, 9, 7, 2, 4, 12, 10, 8, 1, 6, 3, 13] + seed: mytobcffnkvj +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 65, 1, 1, 6, 4, 6, 5, 6, 2] + seed: mytobcffnkvj +- input: [] + output: [] + seed: myzu3g7evxp5nkvj +- input: [0] + output: [0] + seed: myzu3g7evxp5nkvj +- input: [255] + output: [255] + seed: myzu3g7evxp5nkvj +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 1, 4, 2, 6, 5, 6, 4, 1] + seed: myzu3g7evxp5nkvj +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [2, 1, 11, 3, 9, 7, 8, 13, 4, 10, 5, 6, 12] + seed: myzu3g7evxp5nkvj +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 1, 4, 2, 6, 5, 6, 65, 1] + seed: myzu3g7evxp5nkvj +- input: [] + output: [] + seed: xdpli1jsx5xb +- input: [0] + output: [0] + seed: xdpli1jsx5xb +- input: [255] + output: [255] + seed: xdpli1jsx5xb +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 1, 2, 4, 6, 6, 5, 6, 1, 4] + seed: xdpli1jsx5xb +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [5, 8, 12, 9, 11, 4, 7, 13, 1, 3, 2, 10, 6] + seed: xdpli1jsx5xb +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 1, 2, 65, 6, 6, 5, 6, 1, 4] + seed: xdpli1jsx5xb +- input: [] + output: [] + seed: oab3mbb3xe8qsx5xb +- input: [0] + output: [0] + seed: oab3mbb3xe8qsx5xb +- input: [255] + output: [255] + seed: oab3mbb3xe8qsx5xb +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 1, 1, 6, 2, 4, 4, 6, 5] + seed: oab3mbb3xe8qsx5xb +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [1, 8, 5, 13, 2, 10, 7, 11, 12, 6, 3, 4, 9] + seed: oab3mbb3xe8qsx5xb +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 1, 1, 6, 2, 4, 65, 6, 5] + seed: oab3mbb3xe8qsx5xb diff --git a/eth2/validator_change/Cargo.toml b/eth2/validator_change/Cargo.toml new file mode 100644 index 000000000..a1c499340 --- /dev/null +++ b/eth2/validator_change/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "validator_change" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bytes = "0.4.10" +hashing = { path = "../utils/hashing" } +ssz = { path = "../utils/ssz" } +types = { path = "../types" } diff --git a/eth2/validator_change/src/lib.rs b/eth2/validator_change/src/lib.rs new file mode 100644 index 000000000..7c13b168a --- /dev/null +++ b/eth2/validator_change/src/lib.rs @@ -0,0 +1,142 @@ +extern crate bytes; +extern crate hashing; +extern crate types; + +use bytes::{BufMut, BytesMut}; +use hashing::canonical_hash; +use ssz::ssz_encode; +use std::cmp::max; +use types::{Hash256, ValidatorRecord, ValidatorStatus}; + +pub enum UpdateValidatorSetError { + ArithmeticOverflow, +} + +const VALIDATOR_FLAG_ENTRY: u8 = 0; +const VALIDATOR_FLAG_EXIT: u8 = 1; + +pub fn update_validator_set( + validators: &mut Vec, + hash_chain: Hash256, + present_slot: u64, + deposit_size_gwei: u64, + max_validator_churn_quotient: u64, +) -> Result<(), UpdateValidatorSetError> { + /* + * Total balance of all active validators. + * + * Return an error if an overflow occurs. + */ + let total_balance = { + let mut bal: u64 = 0; + for v in validators.iter() { + if v.status_is(ValidatorStatus::Active) { + bal = bal + .checked_add(v.balance) + .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; + } + } + bal + }; + + /* + * Note: this is not the maximum allowable change, it can actually be higher. + */ + let max_allowable_change = { + let double_deposit_size = deposit_size_gwei + .checked_mul(2) + .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; + max( + double_deposit_size, + total_balance / max_validator_churn_quotient, + ) + }; + + let mut hasher = ValidatorChangeHashChain { + bytes: hash_chain.to_vec(), + }; + let mut total_changed: u64 = 0; + for (i, v) in validators.iter_mut().enumerate() { + match v.status { + /* + * Validator is pending activiation. + */ + ValidatorStatus::PendingActivation => { + let new_total_changed = total_changed + .checked_add(deposit_size_gwei) + .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; + /* + * If entering this validator would not exceed the max balance delta, + * activate the validator. + */ + if new_total_changed <= max_allowable_change { + v.status = ValidatorStatus::Active; + hasher.extend(i, &ssz_encode(&v.pubkey), VALIDATOR_FLAG_ENTRY); + total_changed = new_total_changed; + } else { + // Entering the validator would exceed the balance delta. + break; + } + } + /* + * Validator is pending exit. + */ + ValidatorStatus::PendingExit => { + let new_total_changed = total_changed + .checked_add(v.balance) + .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; + /* + * If exiting this validator would not exceed the max balance delta, + * exit the validator + */ + if new_total_changed <= max_allowable_change { + v.status = ValidatorStatus::PendingWithdraw; + v.exit_slot = present_slot; + hasher.extend(i, &ssz_encode(&v.pubkey), VALIDATOR_FLAG_EXIT); + total_changed = new_total_changed; + } else { + // Exiting the validator would exceed the balance delta. + break; + } + } + _ => (), + }; + if total_changed >= max_allowable_change { + break; + } + } + Ok(()) +} + +pub struct ValidatorChangeHashChain { + bytes: Vec, +} + +impl ValidatorChangeHashChain { + pub fn extend(&mut self, index: usize, pubkey: &Vec, flag: u8) { + let mut message = self.bytes.clone(); + message.append(&mut serialize_validator_change_record(index, pubkey, flag)); + self.bytes = canonical_hash(&message); + } +} + +fn serialize_validator_change_record(index: usize, pubkey: &Vec, flag: u8) -> Vec { + let mut buf = BytesMut::with_capacity(68); + buf.put_u8(flag); + let index_bytes = { + let mut buf = BytesMut::with_capacity(8); + buf.put_u64_be(index as u64); + buf.take()[8 - 3..8].to_vec() + }; + buf.put(index_bytes); + buf.put(pubkey); + buf.take().to_vec() +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} diff --git a/lighthouse-beacon b/lighthouse-beacon deleted file mode 160000 index 101f450e8..000000000 --- a/lighthouse-beacon +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 101f450e81bc44641409ffab566f7c5f680dae3d diff --git a/lighthouse-libs b/lighthouse-libs deleted file mode 160000 index 38f70a390..000000000 --- a/lighthouse-libs +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 38f70a390df54c0afb569b1cc50f0c95c5651cb1 diff --git a/lighthouse-validator b/lighthouse-validator deleted file mode 160000 index c72981086..000000000 --- a/lighthouse-validator +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c72981086638c784138905dce755c87cfd03f5aa diff --git a/protos/.gitignore b/protos/.gitignore new file mode 100644 index 000000000..7104339d9 --- /dev/null +++ b/protos/.gitignore @@ -0,0 +1,2 @@ +src/services.rs +src/services_grpc.rs diff --git a/protos/Cargo.toml b/protos/Cargo.toml new file mode 100644 index 000000000..56364d188 --- /dev/null +++ b/protos/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "protos" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" +description = "Google protobuf message and service definitions used in Lighthouse APIs." + +[dependencies] +futures = "0.1" +grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +protobuf = "2.0" + +[build-dependencies] +protoc-grpcio = "0.3.1" diff --git a/protos/build.rs b/protos/build.rs new file mode 100644 index 000000000..108d9e2dd --- /dev/null +++ b/protos/build.rs @@ -0,0 +1,10 @@ +extern crate protoc_grpcio; + +use std::path::Path; + +fn main() { + let proto_root = Path::new("src"); + println!("cargo:rerun-if-changed={}", proto_root.display()); + protoc_grpcio::compile_grpc_protos(&["services.proto"], &[proto_root], &proto_root) + .expect("Failed to compile gRPC definitions!"); +} diff --git a/protos/src/lib.rs b/protos/src/lib.rs new file mode 100644 index 000000000..2759263e7 --- /dev/null +++ b/protos/src/lib.rs @@ -0,0 +1,5 @@ +// The protobuf code-generator is not up-to-date with clippy, therefore we silence some warnings. +#[allow(renamed_and_removed_lints)] +pub mod services; +#[allow(renamed_and_removed_lints)] +pub mod services_grpc; diff --git a/protos/src/services.proto b/protos/src/services.proto new file mode 100644 index 000000000..16e2d4dba --- /dev/null +++ b/protos/src/services.proto @@ -0,0 +1,94 @@ +// TODO: This setup requires that the BN (beacon node) holds the block in state +// during the interval between the `GenerateProposalRequest` and the +// `SubmitProposalRequest`. +// +// This is sub-optimal as if a validator client switches BN during this process +// the block will be lost. +// +// This "stateful" method is being used presently because it's easier and +// requires less maintainence as the `BeaconBlock` definition changes. + +syntax = "proto3"; + +package ethereum.beacon.rpc.v1; + +service BeaconBlockService { + rpc ProduceBeaconBlock(ProduceBeaconBlockRequest) returns (ProduceBeaconBlockResponse); + rpc PublishBeaconBlock(PublishBeaconBlockRequest) returns (PublishBeaconBlockResponse); +} + +service ValidatorService { + // rpc ValidatorAssignment(ValidatorAssignmentRequest) returns (ValidatorAssignmentResponse); + rpc ProposeBlockSlot(ProposeBlockSlotRequest) returns (ProposeBlockSlotResponse); + rpc ValidatorIndex(PublicKey) returns (IndexResponse); +} + +message BeaconBlock { + uint64 slot = 1; + bytes block_root = 2; + bytes randao_reveal = 3; + bytes signature = 4; +} + +// Validator requests an unsigned proposal. +message ProduceBeaconBlockRequest { + uint64 slot = 1; +} + +// Beacon node returns an unsigned proposal. +message ProduceBeaconBlockResponse { + BeaconBlock block = 1; +} + +// Validator submits a signed proposal. +message PublishBeaconBlockRequest { + BeaconBlock block = 1; +} + +// Beacon node indicates a sucessfully submitted proposal. +message PublishBeaconBlockResponse { + bool success = 1; + bytes msg = 2; +} + +// A validators duties for some epoch. +// TODO: add shard duties. +message ValidatorAssignment { + oneof block_production_slot_oneof { + bool block_production_slot_none = 1; + uint64 block_production_slot = 2; + } +} + +message ValidatorAssignmentRequest { + uint64 epoch = 1; + bytes validator_index = 2; +} + +/* + * Propose slot + */ + +message ProposeBlockSlotRequest { + uint64 epoch = 1; + uint64 validator_index = 2; +} + +message ProposeBlockSlotResponse { + oneof slot_oneof { + bool none = 1; + uint64 slot = 2; + } +} + +/* + * Validator Assignment + */ + +message PublicKey { + bytes public_key = 1; +} + +message IndexResponse { + uint64 index = 1; +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml new file mode 100644 index 000000000..8ab515e15 --- /dev/null +++ b/validator_client/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "validator_client" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +block_producer = { path = "../eth2/block_producer" } +bls = { path = "../eth2/utils/bls" } +clap = "2.32.0" +dirs = "1.0.3" +grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +protobuf = "2.0.2" +protos = { path = "../protos" } +slot_clock = { path = "../eth2/utils/slot_clock" } +types = { path = "../eth2/types" } +slog = "^2.2.3" +slog-term = "^2.4.0" +slog-async = "^2.3.0" +ssz = { path = "../eth2/utils/ssz" } diff --git a/validator_client/README.md b/validator_client/README.md new file mode 100644 index 000000000..aa84fe013 --- /dev/null +++ b/validator_client/README.md @@ -0,0 +1,67 @@ +# Lighthouse Validator Client + +The Validator Client (VC) is a stand-alone binary which connects to a Beacon +Node (BN) and fulfils the roles of a validator. + +## Roles + +The VC is responsible for the following tasks: + +- Requesting validator duties (a.k.a. shuffling) from the BN. +- Prompting the BN to produce a new block, when a validators block production + duties require. +- Completing all the fields on a new block (e.g., RANDAO reveal, signature) and + publishing the block to a BN. +- Prompting the BN to produce a new shard atteststation as per a validators + duties. +- Ensuring that no slashable messages are signed by a validator private key. +- Keeping track of the system clock and how it relates to slots/epochs. + +The VC is capable of managing multiple validators in the same process tree. + +## Implementation + +_This section describes the present implementation of this VC binary._ + +### Services + +Each validator is represented by two services, one which tracks the validator +duties and another which performs block production duties. + +A separate thread is maintained for each service, for each validator. As such, +a single validator utilises three (3) threads (one for the base VC and two for +each service) and two validators utilise five (5) threads. + +#### `DutiesManagerService` + +Polls a BN and requests validator responsibilities, as well as a validator +index. The outcome of a successful poll is a `EpochDuties` struct: + +```rust +EpochDuties { + validator_index: u64, + block_prodcution_slot: u64, +} +``` + +This is stored in the `EpochDutiesMap`, a `HashMap` mapping `epoch -> +EpochDuties`. + +#### `BlockProducerService` + +Polls the system clock and determines if a block needs to be produced. Reads +from the `EpochDutiesMap` maintained by the `DutiesManagerService`. + +If block production is required, performs all the necessary duties to request, +complete and return a block from the BN. + +### Configuration + +Presently the validator specifics (pubkey, etc.) are randomly generated and the +chain specification (slot length, BLS domain, etc.) are fixed to foundation +parameters. This is temporary and will be upgrade so these parameters can be +read from file (or initialized on first-boot). + +## BN Communication + +The VC communicates with the BN via a gRPC/protobuf connection. diff --git a/validator_client/src/block_producer_service/beacon_block_grpc_client.rs b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs new file mode 100644 index 000000000..39ef7fcdc --- /dev/null +++ b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs @@ -0,0 +1,102 @@ +use block_producer::{BeaconNode, BeaconNodeError, PublishOutcome}; +use protos::services::{ + BeaconBlock as GrpcBeaconBlock, ProduceBeaconBlockRequest, PublishBeaconBlockRequest, +}; +use protos::services_grpc::BeaconBlockServiceClient; +use ssz::{ssz_encode, Decodable}; +use std::sync::Arc; +use types::{BeaconBlock, BeaconBlockBody, Eth1Data, Hash256, Signature, Slot}; + +/// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be +/// implemented upon it. +pub struct BeaconBlockGrpcClient { + client: Arc, +} + +impl BeaconBlockGrpcClient { + pub fn new(client: Arc) -> Self { + Self { client } + } +} + +impl BeaconNode for BeaconBlockGrpcClient { + /// Request a Beacon Node (BN) to produce a new block at the supplied slot. + /// + /// Returns `None` if it is not possible to produce at the supplied slot. For example, if the + /// BN is unable to find a parent block. + fn produce_beacon_block( + &self, + slot: Slot, + // TODO: use randao_reveal, when proto APIs have been updated. + _randao_reveal: &Signature, + ) -> Result, BeaconNodeError> { + let mut req = ProduceBeaconBlockRequest::new(); + req.set_slot(slot.as_u64()); + + let reply = self + .client + .produce_beacon_block(&req) + .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; + + if reply.has_block() { + let block = reply.get_block(); + + let (signature, _) = Signature::ssz_decode(block.get_signature(), 0) + .map_err(|_| BeaconNodeError::DecodeFailure)?; + + let (randao_reveal, _) = Signature::ssz_decode(block.get_randao_reveal(), 0) + .map_err(|_| BeaconNodeError::DecodeFailure)?; + + // TODO: this conversion is incomplete; fix it. + Ok(Some(BeaconBlock { + slot: Slot::new(block.get_slot()), + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + randao_reveal, + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, + signature, + body: BeaconBlockBody { + proposer_slashings: vec![], + attester_slashings: vec![], + attestations: vec![], + deposits: vec![], + exits: vec![], + }, + })) + } else { + Ok(None) + } + } + + /// Request a Beacon Node (BN) to publish a block. + /// + /// Generally, this will be called after a `produce_beacon_block` call with a block that has + /// been completed (signed) by the validator client. + fn publish_beacon_block(&self, block: BeaconBlock) -> Result { + let mut req = PublishBeaconBlockRequest::new(); + + // TODO: this conversion is incomplete; fix it. + let mut grpc_block = GrpcBeaconBlock::new(); + grpc_block.set_slot(block.slot.as_u64()); + grpc_block.set_block_root(vec![0]); + grpc_block.set_randao_reveal(ssz_encode(&block.randao_reveal)); + grpc_block.set_signature(ssz_encode(&block.signature)); + + req.set_block(grpc_block); + + let reply = self + .client + .publish_beacon_block(&req) + .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; + + if reply.get_success() { + Ok(PublishOutcome::ValidBlock) + } else { + // TODO: distinguish between different errors + Ok(PublishOutcome::InvalidBlock("Publish failed".to_string())) + } + } +} diff --git a/validator_client/src/block_producer_service/mod.rs b/validator_client/src/block_producer_service/mod.rs new file mode 100644 index 000000000..82c3f2537 --- /dev/null +++ b/validator_client/src/block_producer_service/mod.rs @@ -0,0 +1,58 @@ +mod beacon_block_grpc_client; +// mod block_producer_service; + +use block_producer::{ + BeaconNode, BlockProducer, DutiesReader, PollOutcome as BlockProducerPollOutcome, Signer, +}; +use slog::{error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::time::Duration; + +pub use self::beacon_block_grpc_client::BeaconBlockGrpcClient; + +pub struct BlockProducerService { + pub block_producer: BlockProducer, + pub poll_interval_millis: u64, + pub log: Logger, +} + +impl BlockProducerService { + /// Run a loop which polls the block producer each `poll_interval_millis` millseconds. + /// + /// Logs the results of the polls. + pub fn run(&mut self) { + loop { + match self.block_producer.poll() { + Err(error) => { + error!(self.log, "Block producer poll error"; "error" => format!("{:?}", error)) + } + Ok(BlockProducerPollOutcome::BlockProduced(slot)) => { + info!(self.log, "Produced block"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::SlashableBlockNotProduced(slot)) => { + warn!(self.log, "Slashable block was not signed"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::BlockProductionNotRequired(slot)) => { + info!(self.log, "Block production not required"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::ProducerDutiesUnknown(slot)) => { + error!(self.log, "Block production duties unknown"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::SlotAlreadyProcessed(slot)) => { + warn!(self.log, "Attempted to re-process slot"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::BeaconNodeUnableToProduceBlock(slot)) => { + error!(self.log, "Beacon node unable to produce block"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::SignerRejection(slot)) => { + error!(self.log, "The cryptographic signer refused to sign the block"; "slot" => slot) + } + Ok(BlockProducerPollOutcome::ValidatorIsUnknown(slot)) => { + error!(self.log, "The Beacon Node does not recognise the validator"; "slot" => slot) + } + }; + + std::thread::sleep(Duration::from_millis(self.poll_interval_millis)); + } + } +} diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs new file mode 100644 index 000000000..104a4bbe6 --- /dev/null +++ b/validator_client/src/config.rs @@ -0,0 +1,25 @@ +use std::fs; +use std::path::PathBuf; + +/// Stores the core configuration for this validator instance. +#[derive(Clone)] +pub struct ClientConfig { + pub data_dir: PathBuf, + pub server: String, +} + +const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse-validators"; + +impl ClientConfig { + /// Build a new configuration from defaults. + pub fn default() -> Self { + let data_dir = { + let home = dirs::home_dir().expect("Unable to determine home dir."); + home.join(DEFAULT_LIGHTHOUSE_DIR) + }; + fs::create_dir_all(&data_dir) + .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); + let server = "localhost:50051".to_string(); + Self { data_dir, server } + } +} diff --git a/validator_client/src/duties/epoch_duties.rs b/validator_client/src/duties/epoch_duties.rs new file mode 100644 index 000000000..b555eee28 --- /dev/null +++ b/validator_client/src/duties/epoch_duties.rs @@ -0,0 +1,80 @@ +use block_producer::{DutiesReader, DutiesReaderError}; +use std::collections::HashMap; +use std::sync::RwLock; +use types::{Epoch, Slot}; + +/// The information required for a validator to propose and attest during some epoch. +/// +/// Generally obtained from a Beacon Node, this information contains the validators canonical index +/// (thier sequence in the global validator induction process) and the "shuffling" for that index +/// for some epoch. +#[derive(Debug, PartialEq, Clone, Copy, Default)] +pub struct EpochDuties { + pub validator_index: u64, + pub block_production_slot: Option, + // Future shard info +} + +impl EpochDuties { + /// Returns `true` if the supplied `slot` is a slot in which the validator should produce a + /// block. + pub fn is_block_production_slot(&self, slot: Slot) -> bool { + match self.block_production_slot { + Some(s) if s == slot => true, + _ => false, + } + } +} + +pub enum EpochDutiesMapError { + Poisoned, +} + +/// Maps an `epoch` to some `EpochDuties` for a single validator. +pub struct EpochDutiesMap { + pub epoch_length: u64, + pub map: RwLock>, +} + +impl EpochDutiesMap { + pub fn new(epoch_length: u64) -> Self { + Self { + epoch_length, + map: RwLock::new(HashMap::new()), + } + } + + pub fn get(&self, epoch: Epoch) -> Result, EpochDutiesMapError> { + let map = self.map.read().map_err(|_| EpochDutiesMapError::Poisoned)?; + match map.get(&epoch) { + Some(duties) => Ok(Some(*duties)), + None => Ok(None), + } + } + + pub fn insert( + &self, + epoch: Epoch, + epoch_duties: EpochDuties, + ) -> Result, EpochDutiesMapError> { + let mut map = self + .map + .write() + .map_err(|_| EpochDutiesMapError::Poisoned)?; + Ok(map.insert(epoch, epoch_duties)) + } +} + +impl DutiesReader for EpochDutiesMap { + fn is_block_production_slot(&self, slot: Slot) -> Result { + let epoch = slot.epoch(self.epoch_length); + + let map = self.map.read().map_err(|_| DutiesReaderError::Poisoned)?; + let duties = map + .get(&epoch) + .ok_or_else(|| DutiesReaderError::UnknownEpoch)?; + Ok(duties.is_block_production_slot(slot)) + } +} + +// TODO: add tests. diff --git a/validator_client/src/duties/grpc.rs b/validator_client/src/duties/grpc.rs new file mode 100644 index 000000000..94f843b63 --- /dev/null +++ b/validator_client/src/duties/grpc.rs @@ -0,0 +1,54 @@ +use super::traits::{BeaconNode, BeaconNodeError}; +use super::EpochDuties; +use protos::services::{ProposeBlockSlotRequest, PublicKey as IndexRequest}; +use protos::services_grpc::ValidatorServiceClient; +use ssz::ssz_encode; +use types::{Epoch, PublicKey, Slot}; + +impl BeaconNode for ValidatorServiceClient { + /// Request the shuffling from the Beacon Node (BN). + /// + /// As this function takes a `PublicKey`, it will first attempt to resolve the public key into + /// a validator index, then call the BN for production/attestation duties. + /// + /// Note: presently only block production information is returned. + fn request_shuffling( + &self, + epoch: Epoch, + public_key: &PublicKey, + ) -> Result, BeaconNodeError> { + // Lookup the validator index for the supplied public key. + let validator_index = { + let mut req = IndexRequest::new(); + req.set_public_key(ssz_encode(public_key).to_vec()); + let resp = self + .validator_index(&req) + .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; + resp.get_index() + }; + + let mut req = ProposeBlockSlotRequest::new(); + req.set_validator_index(validator_index); + req.set_epoch(epoch.as_u64()); + + let reply = self + .propose_block_slot(&req) + .map_err(|err| BeaconNodeError::RemoteFailure(format!("{:?}", err)))?; + + let block_production_slot = if reply.has_slot() { + Some(reply.get_slot()) + } else { + None + }; + + let block_production_slot = match block_production_slot { + Some(slot) => Some(Slot::new(slot)), + None => None, + }; + + Ok(Some(EpochDuties { + validator_index, + block_production_slot, + })) + } +} diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs new file mode 100644 index 000000000..febab4755 --- /dev/null +++ b/validator_client/src/duties/mod.rs @@ -0,0 +1,161 @@ +mod epoch_duties; +mod grpc; +mod service; +#[cfg(test)] +mod test_node; +mod traits; + +pub use self::epoch_duties::EpochDutiesMap; +use self::epoch_duties::{EpochDuties, EpochDutiesMapError}; +pub use self::service::DutiesManagerService; +use self::traits::{BeaconNode, BeaconNodeError}; +use bls::PublicKey; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{ChainSpec, Epoch}; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum PollOutcome { + /// The `EpochDuties` were not updated during this poll. + NoChange(Epoch), + /// The `EpochDuties` for the `epoch` were previously unknown, but obtained in the poll. + NewDuties(Epoch, EpochDuties), + /// New `EpochDuties` were obtained, different to those which were previously known. This is + /// likely to be the result of chain re-organisation. + DutiesChanged(Epoch, EpochDuties), + /// The Beacon Node was unable to return the duties as the validator is unknown, or the + /// shuffling for the epoch is unknown. + UnknownValidatorOrEpoch(Epoch), +} + +#[derive(Debug, PartialEq)] +pub enum Error { + SlotClockError, + SlotUnknowable, + EpochMapPoisoned, + BeaconNodeError(BeaconNodeError), +} + +/// A polling state machine which ensures the latest `EpochDuties` are obtained from the Beacon +/// Node. +/// +/// There is a single `DutiesManager` per validator instance. +pub struct DutiesManager { + pub duties_map: Arc, + /// The validator's public key. + pub pubkey: PublicKey, + pub spec: Arc, + pub slot_clock: Arc, + pub beacon_node: Arc, +} + +impl DutiesManager { + /// Poll the Beacon Node for `EpochDuties`. + /// + /// The present `epoch` will be learned from the supplied `SlotClock`. In production this will + /// be a wall-clock (e.g., system time, remote server time, etc.). + pub fn poll(&self) -> Result { + let slot = self + .slot_clock + .present_slot() + .map_err(|_| Error::SlotClockError)? + .ok_or(Error::SlotUnknowable)?; + + let epoch = slot.epoch(self.spec.epoch_length); + + if let Some(duties) = self.beacon_node.request_shuffling(epoch, &self.pubkey)? { + // If these duties were known, check to see if they're updates or identical. + let result = if let Some(known_duties) = self.duties_map.get(epoch)? { + if known_duties == duties { + Ok(PollOutcome::NoChange(epoch)) + } else { + Ok(PollOutcome::DutiesChanged(epoch, duties)) + } + } else { + Ok(PollOutcome::NewDuties(epoch, duties)) + }; + self.duties_map.insert(epoch, duties)?; + result + } else { + Ok(PollOutcome::UnknownValidatorOrEpoch(epoch)) + } + } +} + +impl From for Error { + fn from(e: BeaconNodeError) -> Error { + Error::BeaconNodeError(e) + } +} + +impl From for Error { + fn from(e: EpochDutiesMapError) -> Error { + match e { + EpochDutiesMapError::Poisoned => Error::EpochMapPoisoned, + } + } +} + +#[cfg(test)] +mod tests { + use super::test_node::TestBeaconNode; + use super::*; + use bls::Keypair; + use slot_clock::TestingSlotClock; + use types::Slot; + + // TODO: implement more thorough testing. + // https://github.com/sigp/lighthouse/issues/160 + // + // These tests should serve as a good example for future tests. + + #[test] + pub fn polling() { + let spec = Arc::new(ChainSpec::foundation()); + let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); + let keypair = Keypair::random(); + let slot_clock = Arc::new(TestingSlotClock::new(0)); + let beacon_node = Arc::new(TestBeaconNode::default()); + + let manager = DutiesManager { + spec: spec.clone(), + pubkey: keypair.pk.clone(), + duties_map: duties_map.clone(), + slot_clock: slot_clock.clone(), + beacon_node: beacon_node.clone(), + }; + + // Configure response from the BeaconNode. + let duties = EpochDuties { + validator_index: 0, + block_production_slot: Some(Slot::new(10)), + }; + beacon_node.set_next_shuffling_result(Ok(Some(duties))); + + // Get the duties for the first time... + assert_eq!( + manager.poll(), + Ok(PollOutcome::NewDuties(Epoch::new(0), duties)) + ); + // Get the same duties again... + assert_eq!(manager.poll(), Ok(PollOutcome::NoChange(Epoch::new(0)))); + + // Return new duties. + let duties = EpochDuties { + validator_index: 0, + block_production_slot: Some(Slot::new(11)), + }; + beacon_node.set_next_shuffling_result(Ok(Some(duties))); + assert_eq!( + manager.poll(), + Ok(PollOutcome::DutiesChanged(Epoch::new(0), duties)) + ); + + // Return no duties. + beacon_node.set_next_shuffling_result(Ok(None)); + assert_eq!( + manager.poll(), + Ok(PollOutcome::UnknownValidatorOrEpoch(Epoch::new(0))) + ); + } +} diff --git a/validator_client/src/duties/service.rs b/validator_client/src/duties/service.rs new file mode 100644 index 000000000..bdb6faefa --- /dev/null +++ b/validator_client/src/duties/service.rs @@ -0,0 +1,40 @@ +use super::traits::BeaconNode; +use super::{DutiesManager, PollOutcome}; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::time::Duration; + +pub struct DutiesManagerService { + pub manager: DutiesManager, + pub poll_interval_millis: u64, + pub log: Logger, +} + +impl DutiesManagerService { + /// Run a loop which polls the manager each `poll_interval_millis` milliseconds. + /// + /// Logs the results of the polls. + pub fn run(&mut self) { + loop { + match self.manager.poll() { + Err(error) => { + error!(self.log, "Epoch duties poll error"; "error" => format!("{:?}", error)) + } + Ok(PollOutcome::NoChange(epoch)) => { + debug!(self.log, "No change in duties"; "epoch" => epoch) + } + Ok(PollOutcome::DutiesChanged(epoch, duties)) => { + info!(self.log, "Duties changed (potential re-org)"; "epoch" => epoch, "duties" => format!("{:?}", duties)) + } + Ok(PollOutcome::NewDuties(epoch, duties)) => { + info!(self.log, "New duties obtained"; "epoch" => epoch, "duties" => format!("{:?}", duties)) + } + Ok(PollOutcome::UnknownValidatorOrEpoch(epoch)) => { + error!(self.log, "Epoch or validator unknown"; "epoch" => epoch) + } + }; + + std::thread::sleep(Duration::from_millis(self.poll_interval_millis)); + } + } +} diff --git a/validator_client/src/duties/test_node.rs b/validator_client/src/duties/test_node.rs new file mode 100644 index 000000000..331b78f3b --- /dev/null +++ b/validator_client/src/duties/test_node.rs @@ -0,0 +1,32 @@ +use super::traits::{BeaconNode, BeaconNodeError}; +use super::EpochDuties; +use bls::PublicKey; +use std::sync::RwLock; +use types::Epoch; + +type ShufflingResult = Result, BeaconNodeError>; + +/// A test-only struct used to simulate a Beacon Node. +#[derive(Default)] +pub struct TestBeaconNode { + pub request_shuffling_input: RwLock>, + pub request_shuffling_result: RwLock>, +} + +impl TestBeaconNode { + /// Set the result to be returned when `request_shuffling` is called. + pub fn set_next_shuffling_result(&self, result: ShufflingResult) { + *self.request_shuffling_result.write().unwrap() = Some(result); + } +} + +impl BeaconNode for TestBeaconNode { + /// Returns the value specified by the `set_next_shuffling_result`. + fn request_shuffling(&self, epoch: Epoch, public_key: &PublicKey) -> ShufflingResult { + *self.request_shuffling_input.write().unwrap() = Some((epoch, public_key.clone())); + match *self.request_shuffling_result.read().unwrap() { + Some(ref r) => r.clone(), + None => panic!("TestBeaconNode: produce_result == None"), + } + } +} diff --git a/validator_client/src/duties/traits.rs b/validator_client/src/duties/traits.rs new file mode 100644 index 000000000..5bf7da1fd --- /dev/null +++ b/validator_client/src/duties/traits.rs @@ -0,0 +1,20 @@ +use super::EpochDuties; +use bls::PublicKey; +use types::Epoch; + +#[derive(Debug, PartialEq, Clone)] +pub enum BeaconNodeError { + RemoteFailure(String), +} + +/// Defines the methods required to obtain a validators shuffling from a Beacon Node. +pub trait BeaconNode: Send + Sync { + /// Get the shuffling for the given epoch and public key. + /// + /// Returns Ok(None) if the public key is unknown, or the shuffling for that epoch is unknown. + fn request_shuffling( + &self, + epoch: Epoch, + public_key: &PublicKey, + ) -> Result, BeaconNodeError>; +} diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs new file mode 100644 index 000000000..98be9159a --- /dev/null +++ b/validator_client/src/main.rs @@ -0,0 +1,172 @@ +use self::block_producer_service::{BeaconBlockGrpcClient, BlockProducerService}; +use self::duties::{DutiesManager, DutiesManagerService, EpochDutiesMap}; +use crate::config::ClientConfig; +use block_producer::{test_utils::LocalSigner, BlockProducer}; +use bls::Keypair; +use clap::{App, Arg}; +use grpcio::{ChannelBuilder, EnvBuilder}; +use protos::services_grpc::{BeaconBlockServiceClient, ValidatorServiceClient}; +use slog::{error, info, o, Drain}; +use slot_clock::SystemTimeSlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::thread; +use types::ChainSpec; + +mod block_producer_service; +mod config; +mod duties; + +fn main() { + // Logging + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::CompactFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let log = slog::Logger::root(drain, o!()); + + // CLI + let matches = App::new("Lighthouse Validator Client") + .version("0.0.1") + .author("Sigma Prime ") + .about("Eth 2.0 Validator Client") + .arg( + Arg::with_name("datadir") + .long("datadir") + .value_name("DIR") + .help("Data directory for keys and databases.") + .takes_value(true), + ) + .arg( + Arg::with_name("server") + .long("server") + .value_name("server") + .help("Address to connect to BeaconNode.") + .takes_value(true), + ) + .get_matches(); + + let mut config = ClientConfig::default(); + + // Custom datadir + if let Some(dir) = matches.value_of("datadir") { + config.data_dir = PathBuf::from(dir.to_string()); + } + + // Custom server port + if let Some(server_str) = matches.value_of("server") { + if let Ok(addr) = server_str.parse::() { + config.server = addr.to_string(); + } else { + error!(log, "Invalid address"; "server" => server_str); + return; + } + } + + // Log configuration + info!(log, ""; + "data_dir" => &config.data_dir.to_str(), + "server" => &config.server); + + // Beacon node gRPC beacon block endpoints. + let beacon_block_grpc_client = { + let env = Arc::new(EnvBuilder::new().build()); + let ch = ChannelBuilder::new(env).connect(&config.server); + Arc::new(BeaconBlockServiceClient::new(ch)) + }; + + // Beacon node gRPC validator endpoints. + let validator_grpc_client = { + let env = Arc::new(EnvBuilder::new().build()); + let ch = ChannelBuilder::new(env).connect(&config.server); + Arc::new(ValidatorServiceClient::new(ch)) + }; + + // Ethereum + // + // TODO: Permit loading a custom spec from file. + // https://github.com/sigp/lighthouse/issues/160 + let spec = Arc::new(ChainSpec::foundation()); + + // Clock for determining the present slot. + // TODO: this shouldn't be a static time, instead it should be pulled from the beacon node. + // https://github.com/sigp/lighthouse/issues/160 + let genesis_time = 1_549_935_547; + let slot_clock = { + info!(log, "Genesis time"; "unix_epoch_seconds" => genesis_time); + let clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + .expect("Unable to instantiate SystemTimeSlotClock."); + Arc::new(clock) + }; + + let poll_interval_millis = spec.slot_duration * 1000 / 10; // 10% epoch time precision. + info!(log, "Starting block producer service"; "polls_per_epoch" => spec.slot_duration * 1000 / poll_interval_millis); + + /* + * Start threads. + */ + let mut threads = vec![]; + // TODO: keypairs are randomly generated; they should be loaded from a file or generated. + // https://github.com/sigp/lighthouse/issues/160 + let keypairs = vec![Keypair::random()]; + + for keypair in keypairs { + info!(log, "Starting validator services"; "validator" => keypair.pk.concatenated_hex_id()); + let duties_map = Arc::new(EpochDutiesMap::new(spec.epoch_length)); + + // Spawn a new thread to maintain the validator's `EpochDuties`. + let duties_manager_thread = { + let spec = spec.clone(); + let duties_map = duties_map.clone(); + let slot_clock = slot_clock.clone(); + let log = log.clone(); + let beacon_node = validator_grpc_client.clone(); + let pubkey = keypair.pk.clone(); + thread::spawn(move || { + let manager = DutiesManager { + duties_map, + pubkey, + spec, + slot_clock, + beacon_node, + }; + let mut duties_manager_service = DutiesManagerService { + manager, + poll_interval_millis, + log, + }; + + duties_manager_service.run(); + }) + }; + + // Spawn a new thread to perform block production for the validator. + let producer_thread = { + let spec = spec.clone(); + let signer = Arc::new(LocalSigner::new(keypair.clone())); + let duties_map = duties_map.clone(); + let slot_clock = slot_clock.clone(); + let log = log.clone(); + let client = Arc::new(BeaconBlockGrpcClient::new(beacon_block_grpc_client.clone())); + thread::spawn(move || { + let block_producer = + BlockProducer::new(spec, duties_map, slot_clock, client, signer); + let mut block_producer_service = BlockProducerService { + block_producer, + poll_interval_millis, + log, + }; + + block_producer_service.run(); + }) + }; + + threads.push((duties_manager_thread, producer_thread)); + } + + // Naively wait for all the threads to complete. + for tuple in threads { + let (manager, producer) = tuple; + let _ = producer.join(); + let _ = manager.join(); + } +}