Merge branch 'master' into interop

This commit is contained in:
Paul Hauner 2019-08-29 16:00:22 +10:00
commit 29c8477242
No known key found for this signature in database
GPG Key ID: 5E2CFF9B75FA63DF
53 changed files with 2513 additions and 1280 deletions

View File

@ -13,6 +13,7 @@ lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
log = "0.4"
operation_pool = { path = "../../eth2/operation_pool" }
reqwest = "0.9"
rayon = "1.0"
serde = "1.0"
serde_derive = "1.0"
serde_yaml = "0.8"

View File

@ -19,8 +19,7 @@ use state_processing::per_block_processing::{
verify_attestation_for_state, VerifySignatures,
};
use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError,
per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy,
};
use std::sync::Arc;
use std::time::Duration;
@ -716,7 +715,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
finalized: finalized_epoch,
})
} else if let Err(e) =
verify_attestation_for_state(state, &attestation, &self.spec, VerifySignatures::True)
verify_attestation_for_state(state, &attestation, VerifySignatures::True, &self.spec)
{
warn!(
self.log,
@ -895,7 +894,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Apply the received block to its parent state (which has been transitioned into this
// slot).
match per_block_processing(&mut state, &block, &self.spec) {
match per_block_processing(
&mut state,
&block,
Some(block_root),
BlockSignatureStrategy::VerifyIndividual,
&self.spec,
) {
Err(BlockProcessingError::BeaconStateError(e)) => {
return Err(Error::BeaconStateError(e))
}
@ -1059,7 +1064,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
},
};
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::NoVerification,
&self.spec,
)?;
let state_root = state.canonical_root();

View File

@ -1,7 +1,5 @@
use crate::fork_choice::Error as ForkChoiceError;
use state_processing::per_block_processing::errors::{
AttestationValidationError, IndexedAttestationValidationError,
};
use state_processing::per_block_processing::errors::AttestationValidationError;
use state_processing::BlockProcessingError;
use state_processing::SlotProcessingError;
use types::*;
@ -38,7 +36,6 @@ pub enum BeaconChainError {
beacon_block_root: Hash256,
},
AttestationValidationError(AttestationValidationError),
IndexedAttestationValidationError(IndexedAttestationValidationError),
}
easy_from_to!(SlotProcessingError, BeaconChainError);
@ -56,4 +53,3 @@ easy_from_to!(BlockProcessingError, BlockProductionError);
easy_from_to!(BeaconStateError, BlockProductionError);
easy_from_to!(SlotProcessingError, BlockProductionError);
easy_from_to!(AttestationValidationError, BeaconChainError);
easy_from_to!(IndexedAttestationValidationError, BeaconChainError);

View File

@ -1,5 +1,6 @@
use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use lmd_ghost::LmdGhost;
use rayon::prelude::*;
use sloggers::{null::NullLoggerBuilder, Build};
use slot_clock::TestingSlotClock;
use state_processing::per_slot_processing;
@ -225,7 +226,9 @@ where
.expect("should be able to advance state to slot");
}
state.build_all_caches(&self.spec).unwrap();
state
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = match block_strategy {
BlockStrategy::OnCanonicalHead => self
@ -302,7 +305,7 @@ where
AttestationStrategy::SomeValidators(vec) => vec.clone(),
};
let mut vec = vec![];
let mut attestations = vec![];
state
.get_crosslink_committees_at_slot(state.slot)
@ -311,55 +314,70 @@ where
.for_each(|cc| {
let committee_size = cc.committee.len();
for (i, validator_index) in cc.committee.iter().enumerate() {
// Note: searching this array is worst-case `O(n)`. A hashset could be a better
// alternative.
if attesting_validators.contains(validator_index) {
let data = self
.chain
.produce_attestation_data_for_block(
cc.shard,
head_block_root,
head_block_slot,
state,
)
.expect("should produce attestation data");
let mut local_attestations: Vec<Attestation<E>> = cc
.committee
.par_iter()
.enumerate()
.filter_map(|(i, validator_index)| {
// Note: searching this array is worst-case `O(n)`. A hashset could be a better
// alternative.
if attesting_validators.contains(validator_index) {
let data = self
.chain
.produce_attestation_data_for_block(
cc.shard,
head_block_root,
head_block_slot,
state,
)
.expect("should produce attestation data");
let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap();
aggregation_bits.set(i, true).unwrap();
let custody_bits = BitList::with_capacity(committee_size).unwrap();
let mut aggregation_bits = BitList::with_capacity(committee_size)
.expect("should make aggregation bits");
aggregation_bits
.set(i, true)
.expect("should be able to set aggregation bits");
let custody_bits = BitList::with_capacity(committee_size)
.expect("should make custody bits");
let signature = {
let message = AttestationDataAndCustodyBit {
data: data.clone(),
custody_bit: false,
}
.tree_hash_root();
let signature = {
let message = AttestationDataAndCustodyBit {
data: data.clone(),
custody_bit: false,
}
.tree_hash_root();
let domain =
spec.get_domain(data.target.epoch, Domain::Attestation, fork);
let domain =
spec.get_domain(data.target.epoch, Domain::Attestation, fork);
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&Signature::new(
&message,
domain,
self.get_sk(*validator_index),
));
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&Signature::new(
&message,
domain,
self.get_sk(*validator_index),
));
agg_sig
};
agg_sig
};
vec.push(Attestation {
aggregation_bits,
data,
custody_bits,
signature,
})
}
}
let attestation = Attestation {
aggregation_bits,
data,
custody_bits,
signature,
};
Some(attestation)
} else {
None
}
})
.collect();
attestations.append(&mut local_attestations);
});
vec
attestations
}
/// Creates two forks:

View File

@ -6,6 +6,7 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bls = { path = "../../eth2/utils/bls" }
beacon_chain = { path = "../beacon_chain" }
network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" }
@ -30,3 +31,4 @@ lazy_static = "1.3.0"
eth2_config = { path = "../../eth2/utils/eth2_config" }
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
slot_clock = { path = "../../eth2/utils/slot_clock" }
hex = "0.3.2"

View File

@ -1,5 +1,10 @@
use crate::ApiError;
use crate::{ApiError, ApiResult};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use bls::PublicKey;
use hex;
use hyper::{Body, Request, StatusCode};
use serde::de::value::StringDeserializer;
use serde_json::Deserializer;
use store::{iter::AncestorIter, Store};
use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot};
@ -31,6 +36,23 @@ pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
}
}
/// Parse a PublicKey from a `0x` prefixed hex string
pub fn parse_pubkey(string: &str) -> Result<PublicKey, ApiError> {
const PREFIX: &str = "0x";
if string.starts_with(PREFIX) {
let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX))
.map_err(|e| ApiError::InvalidQueryParams(format!("Invalid hex string: {:?}", e)))?;
let pubkey = PublicKey::from_bytes(pubkey_bytes.as_slice()).map_err(|e| {
ApiError::InvalidQueryParams(format!("Unable to deserialize public key: {:?}.", e))
})?;
return Ok(pubkey);
} else {
return Err(ApiError::InvalidQueryParams(
"Public key must have a '0x' prefix".to_string(),
));
}
}
/// Returns the root of the `BeaconBlock` in the canonical chain of `beacon_chain` at the given
/// `slot`, if possible.
///
@ -143,6 +165,12 @@ pub fn state_root_at_slot<T: BeaconChainTypes>(
}
}
pub fn implementation_pending_response(_req: Request<Body>) -> ApiResult {
Err(ApiError::NotImplemented(
"API endpoint has not yet been implemented, but is planned to be soon.".to_owned(),
))
}
#[cfg(test)]
mod test {
use super::*;

View File

@ -10,6 +10,7 @@ mod network;
mod node;
mod spec;
mod url_query;
mod validator;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use client_network::Service as NetworkService;
@ -129,14 +130,26 @@ pub fn start_server<T: BeaconChainTypes>(
// Route the request to the correct handler.
let result = match (req.method(), path.as_ref()) {
// Methods for Beacon Node
//TODO: Remove?
//(&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::<T>(req),
(&Method::GET, "/beacon/head") => beacon::get_head::<T>(req),
(&Method::GET, "/beacon/block") => beacon::get_block::<T>(req),
(&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req),
//TODO Is the below replaced by finalized_checkpoint?
(&Method::GET, "/beacon/chainhead") => {
helpers::implementation_pending_response(req)
}
(&Method::GET, "/beacon/block_root") => beacon::get_block_root::<T>(req),
(&Method::GET, "/beacon/latest_finalized_checkpoint") => {
beacon::get_latest_finalized_checkpoint::<T>(req)
}
(&Method::GET, "/beacon/state") => beacon::get_state::<T>(req),
(&Method::GET, "/beacon/state_root") => beacon::get_state_root::<T>(req),
//TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances
// Methods for Client
(&Method::GET, "/metrics") => metrics::get_prometheus::<T>(req),
(&Method::GET, "/network/enr") => network::get_enr::<T>(req),
(&Method::GET, "/network/peer_count") => network::get_peer_count::<T>(req),
@ -148,10 +161,32 @@ pub fn start_server<T: BeaconChainTypes>(
}
(&Method::GET, "/node/version") => node::get_version(req),
(&Method::GET, "/node/genesis_time") => node::get_genesis_time::<T>(req),
(&Method::GET, "/node/deposit_contract") => {
helpers::implementation_pending_response(req)
}
(&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req),
(&Method::GET, "/node/fork") => helpers::implementation_pending_response(req),
// Methods for Validator
(&Method::GET, "/validator/duties") => validator::get_validator_duties::<T>(req),
(&Method::GET, "/validator/block") => helpers::implementation_pending_response(req),
(&Method::POST, "/validator/block") => {
helpers::implementation_pending_response(req)
}
(&Method::GET, "/validator/attestation") => {
helpers::implementation_pending_response(req)
}
(&Method::POST, "/validator/attestation") => {
helpers::implementation_pending_response(req)
}
(&Method::GET, "/spec") => spec::get_spec::<T>(req),
(&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::<T>(req),
(&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::<T>(req),
_ => Err(ApiError::MethodNotAllowed(path.clone())),
_ => Err(ApiError::NotFound(
"Request path and/or method not found.".to_owned(),
)),
};
let response = match result {

View File

@ -2,6 +2,8 @@ use crate::ApiError;
use hyper::Request;
/// Provides handy functions for parsing the query parameters of a URL.
#[derive(Clone, Copy)]
pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>);
impl<'a> UrlQuery<'a> {
@ -11,9 +13,7 @@ impl<'a> UrlQuery<'a> {
pub fn from_request<T>(req: &'a Request<T>) -> Result<Self, ApiError> {
let query_str = req.uri().query().ok_or_else(|| {
ApiError::InvalidQueryParams(
"URL query must be valid and contain at least one
key."
.to_string(),
"URL query must be valid and contain at least one key.".to_string(),
)
})?;
@ -60,6 +60,23 @@ impl<'a> UrlQuery<'a> {
)))
}
}
/// Returns a vector of all values present where `key` is in `keys
///
/// If no match is found, an `InvalidQueryParams` error is returned.
pub fn all_of(mut self, key: &str) -> Result<Vec<String>, ApiError> {
let queries: Vec<_> = self
.0
.filter_map(|(k, v)| {
if k.eq(key) {
Some(v.into_owned())
} else {
None
}
})
.collect();
Ok(queries)
}
}
#[cfg(test)]

View File

@ -0,0 +1,149 @@
use super::{success_response, ApiResult};
use crate::{helpers::*, ApiError, UrlQuery};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use bls::PublicKey;
use hyper::{Body, Request};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use store::Store;
use types::beacon_state::EthSpec;
use types::{BeaconBlock, BeaconState, Epoch, RelativeEpoch, Shard, Slot};
#[derive(Debug, Serialize, Deserialize)]
pub struct ValidatorDuty {
/// The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._
pub validator_pubkey: String,
/// The slot at which the validator must attest.
pub attestation_slot: Option<Slot>,
/// The shard in which the validator must attest.
pub attestation_shard: Option<Shard>,
/// The slot in which a validator must propose a block, or `null` if block production is not required.
pub block_proposal_slot: Option<Slot>,
}
impl ValidatorDuty {
pub fn new() -> ValidatorDuty {
ValidatorDuty {
validator_pubkey: "".to_string(),
attestation_slot: None,
attestation_shard: None,
block_proposal_slot: None,
}
}
}
/// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch
pub fn get_validator_duties<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
// Get beacon state
let beacon_chain = req
.extensions()
.get::<Arc<BeaconChain<T>>>()
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
let _ = beacon_chain
.ensure_state_caches_are_built()
.map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?;
let head_state = beacon_chain
.speculative_state()
.expect("This is legacy code and should be removed.");
// Parse and check query parameters
let query = UrlQuery::from_request(&req)?;
let current_epoch = head_state.current_epoch();
let epoch = match query.first_of(&["epoch"]) {
Ok((_, v)) => Epoch::new(v.parse::<u64>().map_err(|e| {
ApiError::InvalidQueryParams(format!("Invalid epoch parameter, must be a u64. {:?}", e))
})?),
Err(_) => {
// epoch not supplied, use the current epoch
current_epoch
}
};
let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|e| {
ApiError::InvalidQueryParams(format!(
"Cannot get RelativeEpoch, epoch out of range: {:?}",
e
))
})?;
//TODO: Handle an array of validators, currently only takes one
let mut validators: Vec<PublicKey> = match query.all_of("validator_pubkeys") {
Ok(v) => v
.iter()
.map(|pk| parse_pubkey(pk))
.collect::<Result<Vec<_>, _>>()?,
Err(e) => {
return Err(e);
}
};
let mut duties: Vec<ValidatorDuty> = Vec::new();
// Get a list of all validators for this epoch
let validator_proposers: Vec<usize> = epoch
.slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| {
head_state
.get_beacon_proposer_index(slot, relative_epoch, &beacon_chain.spec)
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to get proposer index for validator: {:?}",
e
))
})
})
.collect::<Result<Vec<usize>, _>>()?;
// Look up duties for each validator
for val_pk in validators {
let mut duty = ValidatorDuty::new();
duty.validator_pubkey = val_pk.as_hex_string();
// Get the validator index
// If it does not exist in the index, just add a null duty and move on.
let val_index: usize = match head_state.get_validator_index(&val_pk) {
Ok(Some(i)) => i,
Ok(None) => {
duties.append(&mut vec![duty]);
continue;
}
Err(e) => {
return Err(ApiError::ServerError(format!(
"Unable to read validator index cache. {:?}",
e
)));
}
};
// Set attestation duties
match head_state.get_attestation_duties(val_index, relative_epoch) {
Ok(Some(d)) => {
duty.attestation_slot = Some(d.slot);
duty.attestation_shard = Some(d.shard);
}
Ok(None) => {}
Err(e) => {
return Err(ApiError::ServerError(format!(
"unable to read cache for attestation duties: {:?}",
e
)))
}
};
// If the validator is to propose a block, identify the slot
if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) {
duty.block_proposal_slot = Some(Slot::new(
relative_epoch
.into_epoch(current_epoch)
.start_slot(T::EthSpec::slots_per_epoch())
.as_u64()
+ slot as u64,
));
}
duties.append(&mut vec![duty]);
}
let body = Body::from(
serde_json::to_string(&duties)
.expect("We should always be able to serialize the duties we created."),
);
Ok(success_response(body))
}

View File

@ -178,21 +178,6 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
resp.set_success(false);
resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec());
}
Err(BeaconChainError::IndexedAttestationValidationError(e)) => {
// Indexed attestation was invalid
warn!(
self.log,
"PublishAttestation";
"type" => "invalid_attestation",
"error" => format!("{:?}", e),
);
resp.set_success(false);
resp.set_msg(
format!("InvalidIndexedAttestation: {:?}", e)
.as_bytes()
.to_vec(),
);
}
Err(e) => {
// Some other error
warn!(

View File

@ -22,7 +22,8 @@ steps:
1. Install build dependencies (Arch packages are listed here, your
distribution will likely be similar):
- `clang`: required by RocksDB.
- `protobuf`: required for protobuf serialization (gRPC).
- `protobuf`: required for protobuf serialization (gRPC)
- `libssl-dev`: also gRPC
- `cmake`: required for building protobuf
- `git-lfs`: The Git extension for [Large File
Support](https://git-lfs.github.com/) (required for Ethereum Foundation

View File

@ -51,7 +51,6 @@ struct ForkedHarness {
impl ForkedHarness {
/// A new standard instance of with constant parameters.
pub fn new() -> Self {
// let (harness, honest_roots, faulty_roots) = get_harness_containing_two_forks();
let harness = BeaconChainHarness::new(VALIDATOR_COUNT);
// Move past the zero slot.

View File

@ -16,9 +16,9 @@ use state_processing::per_block_processing::errors::{
};
use state_processing::per_block_processing::{
get_slashable_indices_modular, verify_attestation_for_block_inclusion,
verify_attester_slashing, verify_exit, verify_exit_time_independent_only,
verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only,
VerifySignatures,
verify_attestation_for_state, verify_attester_slashing, verify_exit,
verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer,
verify_transfer_time_independent_only, VerifySignatures,
};
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
use std::marker::PhantomData;
@ -134,8 +134,8 @@ impl<T: EthSpec> OperationPool<T> {
verify_attestation_for_block_inclusion(
state,
attestation,
spec,
VerifySignatures::True,
spec,
)
.is_ok()
})
@ -220,7 +220,7 @@ impl<T: EthSpec> OperationPool<T> {
) -> Result<(), ProposerSlashingValidationError> {
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
// because they could *become* known later
verify_proposer_slashing(&slashing, state, spec)?;
verify_proposer_slashing(&slashing, state, VerifySignatures::True, spec)?;
self.proposer_slashings
.write()
.insert(slashing.proposer_index, slashing);
@ -248,7 +248,7 @@ impl<T: EthSpec> OperationPool<T> {
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), AttesterSlashingValidationError> {
verify_attester_slashing(state, &slashing, true, spec)?;
verify_attester_slashing(state, &slashing, true, VerifySignatures::True, spec)?;
let id = Self::attester_slashing_id(&slashing, state, spec);
self.attester_slashings.write().insert(id, slashing);
Ok(())
@ -346,7 +346,7 @@ impl<T: EthSpec> OperationPool<T> {
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), ExitValidationError> {
verify_exit_time_independent_only(state, &exit, spec)?;
verify_exit_time_independent_only(state, &exit, VerifySignatures::True, spec)?;
self.voluntary_exits
.write()
.insert(exit.validator_index, exit);
@ -361,7 +361,7 @@ impl<T: EthSpec> OperationPool<T> {
) -> Vec<VoluntaryExit> {
filter_limit_operations(
self.voluntary_exits.read().values(),
|exit| verify_exit(state, exit, spec).is_ok(),
|exit| verify_exit(state, exit, VerifySignatures::False, spec).is_ok(),
T::MaxVoluntaryExits::to_usize(),
)
}
@ -385,7 +385,7 @@ impl<T: EthSpec> OperationPool<T> {
// The signature of the transfer isn't hashed, but because we check
// it before we insert into the HashSet, we can't end up with duplicate
// transactions.
verify_transfer_time_independent_only(state, &transfer, spec)?;
verify_transfer_time_independent_only(state, &transfer, VerifySignatures::True, spec)?;
self.transfers.write().insert(transfer);
Ok(())
}
@ -397,7 +397,9 @@ impl<T: EthSpec> OperationPool<T> {
self.transfers
.read()
.iter()
.filter(|transfer| verify_transfer(state, transfer, spec).is_ok())
.filter(|transfer| {
verify_transfer(state, transfer, VerifySignatures::False, spec).is_ok()
})
.sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee))
.take(T::MaxTransfers::to_usize())
.cloned()

2
eth2/state_processing/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
flame.sh
*.svg

View File

@ -4,11 +4,21 @@ version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[[bench]]
name = "benches"
harness = false
[dev-dependencies]
criterion = "0.2"
env_logger = "0.6.0"
serde = "1.0"
serde_derive = "1.0"
lazy_static = "0.1"
serde_yaml = "0.8"
beacon_chain = { path = "../../beacon_node/beacon_chain" }
store = { path = "../../beacon_node/store" }
lmd_ghost = { path = "../lmd_ghost" }
[dependencies]
bls = { path = "../utils/bls" }
@ -16,6 +26,7 @@ integer-sqrt = "0.1"
itertools = "0.8"
eth2_ssz_types = { path = "../utils/ssz_types" }
merkle_proof = { path = "../utils/merkle_proof" }
log = "0.4"
tree_hash = "0.1"
tree_hash_derive = "0.2"
types = { path = "../types" }

View File

@ -0,0 +1,399 @@
extern crate env_logger;
use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
use state_processing::{test_utils::BlockBuilder, BlockSignatureStrategy, VerifySignatures};
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, Slot};
pub const VALIDATORS_LOW: usize = 32_768;
pub const VALIDATORS_HIGH: usize = 300_032;
fn all_benches(c: &mut Criterion) {
env_logger::init();
average_bench::<MinimalEthSpec>(c, "minimal", VALIDATORS_LOW);
average_bench::<MainnetEthSpec>(c, "mainnet", VALIDATORS_LOW);
average_bench::<MainnetEthSpec>(c, "mainnet", VALIDATORS_HIGH);
worst_bench::<MinimalEthSpec>(c, "minimal", VALIDATORS_LOW);
worst_bench::<MainnetEthSpec>(c, "mainnet", VALIDATORS_LOW);
worst_bench::<MainnetEthSpec>(c, "mainnet", VALIDATORS_HIGH);
}
/// Run a bench with a average complexity block.
fn average_bench<T: EthSpec>(c: &mut Criterion, spec_desc: &str, validator_count: usize) {
let spec = &T::default_spec();
let (block, state) = get_average_block(validator_count, spec);
bench_block::<T>(c, block, state, spec, spec_desc, "average_complexity_block");
}
/// Run a bench with a highly complex block.
fn worst_bench<T: EthSpec>(c: &mut Criterion, spec_desc: &str, validator_count: usize) {
let mut spec = &mut T::default_spec();
// Allows the exits to be processed sucessfully.
spec.persistent_committee_period = 0;
let (block, state) = get_worst_block(validator_count, spec);
bench_block::<T>(c, block, state, spec, spec_desc, "high_complexity_block");
}
/// Return a block and state where the block has "average" complexity. I.e., the number of
/// operations we'd generally expect to see.
fn get_average_block<T: EthSpec>(
validator_count: usize,
spec: &ChainSpec,
) -> (BeaconBlock<T>, BeaconState<T>) {
let mut builder: BlockBuilder<T> = BlockBuilder::new(validator_count, &spec);
// builder.num_attestations = T::MaxAttestations::to_usize();
builder.num_attestations = 16;
builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2));
builder.build_caches(&spec);
builder.build(&spec)
}
/// Return a block and state where the block has the "worst" complexity. The block is not
/// _guaranteed_ to be the worst possible complexity, it just has the max possible operations.
fn get_worst_block<T: EthSpec>(
validator_count: usize,
spec: &ChainSpec,
) -> (BeaconBlock<T>, BeaconState<T>) {
let mut builder: BlockBuilder<T> = BlockBuilder::new(validator_count, &spec);
builder.maximize_block_operations();
// FIXME: enable deposits once we can generate them with valid proofs.
builder.num_deposits = 0;
builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2));
builder.build_caches(&spec);
builder.build(&spec)
}
fn bench_block<T: EthSpec>(
c: &mut Criterion,
block: BeaconBlock<T>,
state: BeaconState<T>,
spec: &ChainSpec,
spec_desc: &str,
block_desc: &str,
) {
let validator_count = state.validators.len();
let title = &format!(
"{}/{}_validators/{}",
spec_desc, validator_count, block_desc
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new(
"per_block_processing/individual_signature_verification",
move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::<T>(
state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
)
.expect("block processing should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
},
)
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new(
"per_block_processing/bulk_signature_verification",
move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::<T>(
state,
&block,
None,
BlockSignatureStrategy::VerifyBulk,
&spec,
)
.expect("block processing should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
},
)
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("per_block_processing/no_signature_verification", move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::<T>(
state,
&block,
None,
BlockSignatureStrategy::NoVerification,
&spec,
)
.expect("block processing should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("process_block_header", move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::process_block_header::<T>(
state,
&block,
None,
VerifySignatures::True,
&spec,
)
.expect("process_block_header should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("verify_block_signature", move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::verify_block_signature::<T>(
state, &block, None, &spec,
)
.expect("verify_block_signature should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("process_attestations", move |b| {
b.iter_batched_ref(
|| (local_spec.clone(), local_state.clone(), local_block.clone()),
|(spec, ref mut state, block)| {
black_box(
state_processing::per_block_processing::process_attestations::<T>(
state,
&block.body.attestations,
VerifySignatures::True,
&spec,
)
.expect("attestation processing should succeed"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("verify_attestation", move |b| {
b.iter_batched_ref(
|| {
let attestation = &local_block.body.attestations[0];
(local_spec.clone(), local_state.clone(), attestation.clone())
},
|(spec, ref mut state, attestation)| {
black_box(
state_processing::per_block_processing::verify_attestation_for_block_inclusion(
state,
&attestation,
VerifySignatures::True,
spec,
)
.expect("should verify attestation"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
c.bench(
&title,
Benchmark::new("get_indexed_attestation", move |b| {
b.iter_batched_ref(
|| {
let attestation = &local_block.body.attestations[0];
(local_state.clone(), attestation.clone())
},
|(ref mut state, attestation)| {
black_box(
state_processing::common::get_indexed_attestation(state, &attestation)
.expect("should get indexed attestation"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("is_valid_indexed_attestation_with_signature", move |b| {
b.iter_batched_ref(
|| {
let attestation = &local_block.body.attestations[0];
let indexed_attestation = state_processing::common::get_indexed_attestation(
&local_state,
&attestation,
)
.expect("should get indexed attestation");
(
local_spec.clone(),
local_state.clone(),
indexed_attestation.clone(),
)
},
|(spec, ref mut state, indexed_attestation)| {
black_box(
state_processing::per_block_processing::is_valid_indexed_attestation(
state,
&indexed_attestation,
VerifySignatures::True,
spec,
)
.expect("should run is_valid_indexed_attestation"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
let local_spec = spec.clone();
c.bench(
&title,
Benchmark::new("is_valid_indexed_attestation_without_signature", move |b| {
b.iter_batched_ref(
|| {
let attestation = &local_block.body.attestations[0];
let indexed_attestation = state_processing::common::get_indexed_attestation(
&local_state,
&attestation,
)
.expect("should get indexed attestation");
(
local_spec.clone(),
local_state.clone(),
indexed_attestation.clone(),
)
},
|(spec, ref mut state, indexed_attestation)| {
black_box(
state_processing::per_block_processing::is_valid_indexed_attestation(
state,
&indexed_attestation,
VerifySignatures::False,
spec,
)
.expect("should run is_valid_indexed_attestation_without_signature"),
)
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let local_block = block.clone();
let local_state = state.clone();
c.bench(
&title,
Benchmark::new("get_attesting_indices", move |b| {
b.iter_batched_ref(
|| {
let attestation = &local_block.body.attestations[0];
(local_state.clone(), attestation.clone())
},
|(ref mut state, attestation)| {
black_box(state_processing::common::get_attesting_indices(
state,
&attestation.data,
&attestation.aggregation_bits,
))
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
}
criterion_group!(benches, all_benches,);
criterion_main!(benches);

View File

@ -1,16 +1,16 @@
use super::get_attesting_indices;
use crate::per_block_processing::errors::{
AttestationInvalid as Invalid, AttestationValidationError as Error,
};
use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError};
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
/// Convert `attestation` to (almost) indexed-verifiable form.
///
/// Spec v0.8.0
pub fn get_indexed_attestation<T: EthSpec>(
state: &BeaconState<T>,
attestation: &Attestation<T>,
) -> Result<IndexedAttestation<T>, Error> {
) -> Result<IndexedAttestation<T>> {
let attesting_indices =
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?;

View File

@ -6,11 +6,11 @@ pub mod genesis;
pub mod per_block_processing;
pub mod per_epoch_processing;
pub mod per_slot_processing;
pub mod test_utils;
pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state};
pub use per_block_processing::{
errors::{BlockInvalid, BlockProcessingError},
per_block_processing, per_block_processing_without_verifying_block_signature,
errors::BlockProcessingError, per_block_processing, BlockSignatureStrategy, VerifySignatures,
};
pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing};
pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};

View File

@ -1,14 +1,16 @@
macro_rules! verify {
($condition: expr, $result: expr) => {
if !$condition {
return Err(Error::Invalid($result));
return Err(crate::per_block_processing::errors::BlockOperationError::invalid($result));
}
};
}
macro_rules! invalid {
($result: expr) => {
return Err(Error::Invalid($result));
macro_rules! block_verify {
($condition: expr, $result: expr) => {
if !$condition {
return Err($result);
}
};
}

View File

@ -1,19 +1,19 @@
use crate::common::{initiate_validator_exit, slash_validator};
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex};
use rayon::prelude::*;
use signature_sets::{block_proposal_signature_set, randao_signature_set};
use std::collections::HashSet;
use std::convert::TryInto;
use std::iter::FromIterator;
use tree_hash::{SignedRoot, TreeHash};
use tree_hash::SignedRoot;
use types::*;
pub use self::verify_attester_slashing::{
get_slashable_indices, get_slashable_indices_modular, verify_attester_slashing,
};
pub use self::verify_proposer_slashing::verify_proposer_slashing;
pub use is_valid_indexed_attestation::{
is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature,
};
pub use block_signature_verifier::BlockSignatureVerifier;
pub use is_valid_indexed_attestation::is_valid_indexed_attestation;
pub use verify_attestation::{
verify_attestation_for_block_inclusion, verify_attestation_for_state,
};
@ -26,8 +26,10 @@ pub use verify_transfer::{
};
pub mod block_processing_builder;
mod block_signature_verifier;
pub mod errors;
mod is_valid_indexed_attestation;
mod signature_sets;
pub mod tests;
mod verify_attestation;
mod verify_attester_slashing;
@ -36,39 +38,30 @@ mod verify_exit;
mod verify_proposer_slashing;
mod verify_transfer;
#[derive(PartialEq)]
/// The strategy to be used when validating the block's signatures.
#[derive(PartialEq, Clone, Copy)]
pub enum BlockSignatureStrategy {
/// Do not validate any signature. Use with caution.
NoVerification,
/// Validate each signature individually, as its object is being processed.
VerifyIndividual,
/// Verify all signatures in bulk at the beginning of block processing.
VerifyBulk,
}
/// The strategy to be used when validating the block's signatures.
#[derive(PartialEq, Clone, Copy)]
pub enum VerifySignatures {
/// Validate all signatures encountered.
True,
/// Do not validate any signature. Use with caution.
False,
}
/// Updates the state for a new block, whilst validating that the block is valid.
///
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// Spec v0.8.0
pub fn per_block_processing<T: EthSpec>(
state: &mut BeaconState<T>,
block: &BeaconBlock<T>,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(state, block, true, spec)
}
/// Updates the state for a new block, whilst validating that the block is valid, without actually
/// checking the block proposer signature.
///
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// Spec v0.8.0
pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
state: &mut BeaconState<T>,
block: &BeaconBlock<T>,
spec: &ChainSpec,
) -> Result<(), Error> {
per_block_processing_signature_optional(state, block, false, spec)
impl VerifySignatures {
pub fn is_true(&self) -> bool {
*self == VerifySignatures::True
}
}
/// Updates the state for a new block, whilst validating that the block is valid, optionally
@ -77,27 +70,65 @@ pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute.
///
/// If `block_signed_root` is `Some`, this root is used for verification of the proposers
/// signature. If it is `None` the signed root is calculated here. This parameter only exists to
/// avoid re-calculating the root when it is already known.
///
/// Spec v0.8.0
fn per_block_processing_signature_optional<T: EthSpec>(
pub fn per_block_processing<T: EthSpec>(
mut state: &mut BeaconState<T>,
block: &BeaconBlock<T>,
should_verify_block_signature: bool,
block_signed_root: Option<Hash256>,
block_signature_strategy: BlockSignatureStrategy,
spec: &ChainSpec,
) -> Result<(), Error> {
process_block_header(state, block, spec, should_verify_block_signature)?;
) -> Result<(), BlockProcessingError> {
let verify_signatures = match block_signature_strategy {
BlockSignatureStrategy::VerifyBulk => {
// Verify all signatures in the block at once.
block_verify!(
BlockSignatureVerifier::verify_entire_block(state, block, spec).is_ok(),
BlockProcessingError::BulkSignatureVerificationFailed
);
VerifySignatures::False
}
BlockSignatureStrategy::VerifyIndividual => VerifySignatures::True,
BlockSignatureStrategy::NoVerification => VerifySignatures::False,
};
process_block_header(state, block, block_signed_root, verify_signatures, spec)?;
// Ensure the current and previous epoch caches are built.
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
state.build_committee_cache(RelativeEpoch::Current, spec)?;
process_randao(&mut state, &block, &spec)?;
process_randao(&mut state, &block, verify_signatures, &spec)?;
process_eth1_data(&mut state, &block.body.eth1_data)?;
process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?;
process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?;
process_attestations(&mut state, &block.body.attestations, spec)?;
process_proposer_slashings(
&mut state,
&block.body.proposer_slashings,
verify_signatures,
spec,
)?;
process_attester_slashings(
&mut state,
&block.body.attester_slashings,
verify_signatures,
spec,
)?;
process_attestations(
&mut state,
&block.body.attestations,
verify_signatures,
spec,
)?;
process_deposits(&mut state, &block.body.deposits, spec)?;
process_exits(&mut state, &block.body.voluntary_exits, spec)?;
process_transfers(&mut state, &block.body.transfers, spec)?;
process_exits(
&mut state,
&block.body.voluntary_exits,
verify_signatures,
spec,
)?;
process_transfers(&mut state, &block.body.transfers, verify_signatures, spec)?;
Ok(())
}
@ -108,16 +139,17 @@ fn per_block_processing_signature_optional<T: EthSpec>(
pub fn process_block_header<T: EthSpec>(
state: &mut BeaconState<T>,
block: &BeaconBlock<T>,
block_signed_root: Option<Hash256>,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
should_verify_block_signature: bool,
) -> Result<(), Error> {
verify!(block.slot == state.slot, Invalid::StateSlotMismatch);
) -> Result<(), BlockOperationError<HeaderInvalid>> {
verify!(block.slot == state.slot, HeaderInvalid::StateSlotMismatch);
let expected_previous_block_root =
Hash256::from_slice(&state.latest_block_header.signed_root());
verify!(
block.parent_root == expected_previous_block_root,
Invalid::ParentBlockRootMismatch {
HeaderInvalid::ParentBlockRootMismatch {
state: expected_previous_block_root,
block: block.parent_root,
}
@ -128,10 +160,13 @@ pub fn process_block_header<T: EthSpec>(
// Verify proposer is not slashed
let proposer_idx = state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?;
let proposer = &state.validators[proposer_idx];
verify!(!proposer.slashed, Invalid::ProposerSlashed(proposer_idx));
verify!(
!proposer.slashed,
HeaderInvalid::ProposerSlashed(proposer_idx)
);
if should_verify_block_signature {
verify_block_signature(&state, &block, &spec)?;
if verify_signatures.is_true() {
verify_block_signature(&state, &block, block_signed_root, &spec)?;
}
Ok(())
@ -143,22 +178,12 @@ pub fn process_block_header<T: EthSpec>(
pub fn verify_block_signature<T: EthSpec>(
state: &BeaconState<T>,
block: &BeaconBlock<T>,
block_signed_root: Option<Hash256>,
spec: &ChainSpec,
) -> Result<(), Error> {
let block_proposer = &state.validators
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
let domain = spec.get_domain(
block.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer,
&state.fork,
);
) -> Result<(), BlockOperationError<HeaderInvalid>> {
verify!(
block
.signature
.verify(&block.signed_root()[..], domain, &block_proposer.pubkey),
Invalid::BadSignature
block_proposal_signature_set(state, block, block_signed_root, spec)?.is_valid(),
HeaderInvalid::ProposalSignatureInvalid
);
Ok(())
@ -171,24 +196,16 @@ pub fn verify_block_signature<T: EthSpec>(
pub fn process_randao<T: EthSpec>(
state: &mut BeaconState<T>,
block: &BeaconBlock<T>,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
let block_proposer = &state.validators
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
// Verify RANDAO reveal.
verify!(
block.body.randao_reveal.verify(
&state.current_epoch().tree_hash_root()[..],
spec.get_domain(
block.slot.epoch(T::slots_per_epoch()),
Domain::Randao,
&state.fork
),
&block_proposer.pubkey
),
Invalid::BadRandaoSignature
);
) -> Result<(), BlockProcessingError> {
if verify_signatures.is_true() {
// Verify RANDAO reveal signature.
block_verify!(
randao_signature_set(state, block, spec)?.is_valid(),
BlockProcessingError::RandaoSignatureInvalid
);
}
// Update the current epoch RANDAO mix.
state.update_randao_mix(state.current_epoch(), &block.body.randao_reveal)?;
@ -227,14 +244,15 @@ pub fn process_eth1_data<T: EthSpec>(
pub fn process_proposer_slashings<T: EthSpec>(
state: &mut BeaconState<T>,
proposer_slashings: &[ProposerSlashing],
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
// Verify proposer slashings in parallel.
proposer_slashings
.par_iter()
.enumerate()
.try_for_each(|(i, proposer_slashing)| {
verify_proposer_slashing(proposer_slashing, &state, spec)
verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec)
.map_err(|e| e.into_with_index(i))
})?;
@ -255,8 +273,9 @@ pub fn process_proposer_slashings<T: EthSpec>(
pub fn process_attester_slashings<T: EthSpec>(
state: &mut BeaconState<T>,
attester_slashings: &[AttesterSlashing<T>],
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
// Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not
// the `AttesterSlashing`s themselves).
let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2);
@ -270,7 +289,7 @@ pub fn process_attester_slashings<T: EthSpec>(
.par_iter()
.enumerate()
.try_for_each(|(i, indexed_attestation)| {
is_valid_indexed_attestation(&state, indexed_attestation, spec)
is_valid_indexed_attestation(&state, indexed_attestation, verify_signatures, spec)
.map_err(|e| e.into_with_index(i))
})?;
let all_indexed_attestations_have_been_checked = true;
@ -283,6 +302,7 @@ pub fn process_attester_slashings<T: EthSpec>(
&state,
&attester_slashing,
should_verify_indexed_attestations,
verify_signatures,
spec,
)
.map_err(|e| e.into_with_index(i))?;
@ -307,8 +327,9 @@ pub fn process_attester_slashings<T: EthSpec>(
pub fn process_attestations<T: EthSpec>(
state: &mut BeaconState<T>,
attestations: &[Attestation<T>],
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
// Ensure the previous epoch cache exists.
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
@ -317,7 +338,7 @@ pub fn process_attestations<T: EthSpec>(
.par_iter()
.enumerate()
.try_for_each(|(i, attestation)| {
verify_attestation_for_block_inclusion(state, attestation, spec, VerifySignatures::True)
verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec)
.map_err(|e| e.into_with_index(i))
})?;
@ -355,14 +376,17 @@ pub fn process_deposits<T: EthSpec>(
state: &mut BeaconState<T>,
deposits: &[Deposit],
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(
deposits.len() as u64
== std::cmp::min(
T::MaxDeposits::to_u64(),
state.eth1_data.deposit_count - state.eth1_deposit_index
),
Invalid::DepositCountInvalid
) -> Result<(), BlockProcessingError> {
let expected_deposit_len = std::cmp::min(
T::MaxDeposits::to_u64(),
state.eth1_data.deposit_count - state.eth1_deposit_index,
);
block_verify!(
deposits.len() as u64 == expected_deposit_len,
BlockProcessingError::DepositCountInvalid {
expected: expected_deposit_len as usize,
found: deposits.len(),
}
);
// Verify merkle proofs in parallel.
@ -390,7 +414,7 @@ pub fn process_deposit<T: EthSpec>(
deposit: &Deposit,
spec: &ChainSpec,
verify_merkle_proof: bool,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
let deposit_index = state.eth1_deposit_index as usize;
if verify_merkle_proof {
verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index, spec)
@ -420,7 +444,7 @@ pub fn process_deposit<T: EthSpec>(
} else {
// The signature should be checked for new validators. Return early for a bad
// signature.
if verify_deposit_signature(state, deposit, spec, &pubkey).is_err() {
if verify_deposit_signature(state, deposit, spec).is_err() {
return Ok(());
}
@ -454,14 +478,15 @@ pub fn process_deposit<T: EthSpec>(
pub fn process_exits<T: EthSpec>(
state: &mut BeaconState<T>,
voluntary_exits: &[VoluntaryExit],
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
// Verify exits in parallel.
voluntary_exits
.par_iter()
.enumerate()
.try_for_each(|(i, exit)| {
verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))
verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))
})?;
// Update the state in series.
@ -481,19 +506,24 @@ pub fn process_exits<T: EthSpec>(
pub fn process_transfers<T: EthSpec>(
state: &mut BeaconState<T>,
transfers: &[Transfer],
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<(), BlockProcessingError> {
let expected_transfers = HashSet::<_>::from_iter(transfers).len();
// Verify that there are no duplicate transfers
verify!(
transfers.len() == HashSet::<_>::from_iter(transfers).len(),
Invalid::DuplicateTransfers
block_verify!(
transfers.len() == expected_transfers,
BlockProcessingError::DuplicateTransfers {
duplicates: transfers.len().saturating_sub(expected_transfers)
}
);
transfers
.par_iter()
.enumerate()
.try_for_each(|(i, transfer)| {
verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))
verify_transfer(&state, transfer, verify_signatures, spec)
.map_err(|e| e.into_with_index(i))
})?;
for (i, transfer) in transfers.iter().enumerate() {

View File

@ -0,0 +1,227 @@
use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetResult, *};
use crate::common::get_indexed_attestation;
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
use bls::{verify_signature_sets, SignatureSet};
use rayon::prelude::*;
use types::{
BeaconBlock, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, IndexedAttestation,
};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, PartialEq)]
pub enum Error {
/// All public keys were found but signature verification failed. The block is invalid.
SignatureInvalid,
/// An attestation in the block was invalid. The block is invalid.
AttestationValidationError(BlockOperationError<AttestationInvalid>),
/// There was an error attempting to read from a `BeaconState`. Block
/// validity was not determined.
BeaconStateError(BeaconStateError),
/// Failed to load a signature set. The block may be invalid or we failed to process it.
SignatureSetError(SignatureSetError),
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
impl From<SignatureSetError> for Error {
fn from(e: SignatureSetError) -> Error {
Error::SignatureSetError(e)
}
}
impl From<BlockOperationError<AttestationInvalid>> for Error {
fn from(e: BlockOperationError<AttestationInvalid>) -> Error {
Error::AttestationValidationError(e)
}
}
/// Reads the BLS signatures and keys from a `BeaconBlock`, storing them as a `Vec<SignatureSet>`.
///
/// This allows for optimizations related to batch BLS operations (see the
/// `Self::verify_entire_block(..)` function).
pub struct BlockSignatureVerifier<'a, T: EthSpec> {
block: &'a BeaconBlock<T>,
state: &'a BeaconState<T>,
spec: &'a ChainSpec,
sets: Vec<SignatureSet<'a>>,
}
impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
/// Create a new verifier without any included signatures. See the `include...` functions to
/// add signatures, and the `verify`
pub fn new(state: &'a BeaconState<T>, block: &'a BeaconBlock<T>, spec: &'a ChainSpec) -> Self {
Self {
block,
state,
spec,
sets: vec![],
}
}
/// Verify all* the signatures in the given `BeaconBlock`, returning `Ok(())` if the signatures
/// are valid.
///
/// * : _Does not verify any signatures in `block.body.deposits`. A block is still valid if it
/// contains invalid signatures on deposits._
///
/// See `Self::verify` for more detail.
pub fn verify_entire_block(
state: &'a BeaconState<T>,
block: &'a BeaconBlock<T>,
spec: &'a ChainSpec,
) -> Result<()> {
let mut verifier = Self::new(state, block, spec);
verifier.include_block_proposal(None)?;
verifier.include_randao_reveal()?;
verifier.include_proposer_slashings()?;
verifier.include_attester_slashings()?;
verifier.include_attestations()?;
/*
* Deposits are not included because they can legally have invalid signatures.
*/
verifier.include_exits()?;
verifier.include_transfers()?;
verifier.verify()
}
/// Verify all* the signatures that have been included in `self`, returning `Ok(())` if the
/// signatures are all valid.
///
/// ## Notes
///
/// Signature validation will take place in accordance to the [Faster verification of multiple
/// BLS signatures](https://ethresear.ch/t/fast-verification-of-multiple-bls-signatures/5407)
/// optimization proposed by Vitalik Buterin.
///
/// It is not possible to know exactly _which_ signature is invalid here, just that
/// _at least one_ was invalid.
///
/// Uses `rayon` to do a map-reduce of Vitalik's method across multiple cores.
pub fn verify(self) -> Result<()> {
let num_sets = self.sets.len();
let num_chunks = std::cmp::max(1, num_sets / rayon::current_num_threads());
let result: bool = self
.sets
.into_par_iter()
.chunks(num_chunks)
.map(|chunk| verify_signature_sets(chunk.into_iter()))
.reduce(|| true, |current, this| current && this);
if result {
Ok(())
} else {
Err(Error::SignatureInvalid)
}
}
/// Includes the block signature for `self.block` for verification.
fn include_block_proposal(&mut self, block_root: Option<Hash256>) -> Result<()> {
let set = block_proposal_signature_set(self.state, self.block, block_root, self.spec)?;
self.sets.push(set);
Ok(())
}
/// Includes the randao signature for `self.block` for verification.
fn include_randao_reveal(&mut self) -> Result<()> {
let set = randao_signature_set(self.state, self.block, self.spec)?;
self.sets.push(set);
Ok(())
}
/// Includes all signatures in `self.block.body.proposer_slashings` for verification.
fn include_proposer_slashings(&mut self) -> Result<()> {
let mut sets: Vec<SignatureSet> = self
.block
.body
.proposer_slashings
.iter()
.map(|proposer_slashing| {
let (set_1, set_2) =
proposer_slashing_signature_set(self.state, proposer_slashing, self.spec)?;
Ok(vec![set_1, set_2])
})
.collect::<SignatureSetResult<Vec<Vec<SignatureSet>>>>()?
.into_iter()
.flatten()
.collect();
self.sets.append(&mut sets);
Ok(())
}
/// Includes all signatures in `self.block.body.attester_slashings` for verification.
fn include_attester_slashings(&mut self) -> Result<()> {
self.block
.body
.attester_slashings
.iter()
.try_for_each(|attester_slashing| {
let (set_1, set_2) =
attester_slashing_signature_sets(&self.state, attester_slashing, &self.spec)?;
self.sets.push(set_1);
self.sets.push(set_2);
Ok(())
})
}
/// Includes all signatures in `self.block.body.attestations` for verification.
fn include_attestations(&mut self) -> Result<Vec<IndexedAttestation<T>>> {
self.block
.body
.attestations
.iter()
.map(|attestation| {
let indexed_attestation = get_indexed_attestation(self.state, attestation)?;
self.sets.push(indexed_attestation_signature_set(
&self.state,
&attestation.signature,
&indexed_attestation,
&self.spec,
)?);
Ok(indexed_attestation)
})
.collect::<Result<_>>()
.map_err(Into::into)
}
/// Includes all signatures in `self.block.body.voluntary_exits` for verification.
fn include_exits(&mut self) -> Result<()> {
let mut sets = self
.block
.body
.voluntary_exits
.iter()
.map(|exit| exit_signature_set(&self.state, exit, &self.spec))
.collect::<SignatureSetResult<_>>()?;
self.sets.append(&mut sets);
Ok(())
}
/// Includes all signatures in `self.block.body.transfers` for verification.
fn include_transfers(&mut self) -> Result<()> {
let mut sets = self
.block
.body
.transfers
.iter()
.map(|transfer| transfer_signature_set(&self.state, transfer, &self.spec))
.collect::<SignatureSetResult<_>>()?;
self.sets.append(&mut sets);
Ok(())
}
}

View File

@ -1,42 +1,87 @@
use super::signature_sets::Error as SignatureSetError;
use types::*;
macro_rules! impl_from_beacon_state_error {
($type: ident) => {
impl From<BeaconStateError> for $type {
fn from(e: BeaconStateError) -> $type {
$type::BeaconStateError(e)
}
}
};
/// The error returned from the `per_block_processing` function. Indicates that a block is either
/// invalid, or we were unable to determine it's validity (we encountered an unexpected error).
///
/// Any of the `...Error` variants indicate that at some point during block (and block operation)
/// verification, there was an error. There is no indication as to _where_ that error happened
/// (e.g., when processing attestations instead of when processing deposits).
#[derive(Debug, PartialEq)]
pub enum BlockProcessingError {
RandaoSignatureInvalid,
BulkSignatureVerificationFailed,
StateRootMismatch,
DepositCountInvalid {
expected: usize,
found: usize,
},
DuplicateTransfers {
duplicates: usize,
},
HeaderInvalid {
reason: HeaderInvalid,
},
ProposerSlashingInvalid {
index: usize,
reason: ProposerSlashingInvalid,
},
AttesterSlashingInvalid {
index: usize,
reason: AttesterSlashingInvalid,
},
IndexedAttestationInvalid {
index: usize,
reason: IndexedAttestationInvalid,
},
AttestationInvalid {
index: usize,
reason: AttestationInvalid,
},
DepositInvalid {
index: usize,
reason: DepositInvalid,
},
ExitInvalid {
index: usize,
reason: ExitInvalid,
},
TransferInvalid {
index: usize,
reason: TransferInvalid,
},
BeaconStateError(BeaconStateError),
SignatureSetError(SignatureSetError),
SszTypesError(ssz_types::Error),
}
macro_rules! impl_into_with_index_with_beacon_error {
($error_type: ident, $invalid_type: ident) => {
impl IntoWithIndex<BlockProcessingError> for $error_type {
fn into_with_index(self, i: usize) -> BlockProcessingError {
match self {
$error_type::Invalid(e) => {
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
}
$error_type::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
}
}
}
};
impl From<BeaconStateError> for BlockProcessingError {
fn from(e: BeaconStateError) -> Self {
BlockProcessingError::BeaconStateError(e)
}
}
macro_rules! impl_into_with_index_without_beacon_error {
($error_type: ident, $invalid_type: ident) => {
impl IntoWithIndex<BlockProcessingError> for $error_type {
fn into_with_index(self, i: usize) -> BlockProcessingError {
match self {
$error_type::Invalid(e) => {
BlockProcessingError::Invalid(BlockInvalid::$invalid_type(i, e))
}
}
}
impl From<SignatureSetError> for BlockProcessingError {
fn from(e: SignatureSetError) -> Self {
BlockProcessingError::SignatureSetError(e)
}
}
impl From<ssz_types::Error> for BlockProcessingError {
fn from(error: ssz_types::Error) -> Self {
BlockProcessingError::SszTypesError(error)
}
}
impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
fn from(e: BlockOperationError<HeaderInvalid>) -> BlockProcessingError {
match e {
BlockOperationError::Invalid(reason) => BlockProcessingError::HeaderInvalid { reason },
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
}
};
}
}
/// A conversion that consumes `self` and adds an `index` variable to resulting struct.
@ -48,81 +93,117 @@ pub trait IntoWithIndex<T>: Sized {
fn into_with_index(self, index: usize) -> T;
}
/*
* Block Validation
*/
macro_rules! impl_into_block_processing_error_with_index {
($($type: ident),*) => {
$(
impl IntoWithIndex<BlockProcessingError> for BlockOperationError<$type> {
fn into_with_index(self, index: usize) -> BlockProcessingError {
match self {
BlockOperationError::Invalid(reason) => BlockProcessingError::$type {
index,
reason
},
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
}
}
}
)*
};
}
impl_into_block_processing_error_with_index!(
ProposerSlashingInvalid,
AttesterSlashingInvalid,
IndexedAttestationInvalid,
AttestationInvalid,
DepositInvalid,
ExitInvalid,
TransferInvalid
);
pub type HeaderValidationError = BlockOperationError<HeaderInvalid>;
pub type AttesterSlashingValidationError = BlockOperationError<AttesterSlashingInvalid>;
pub type ProposerSlashingValidationError = BlockOperationError<ProposerSlashingInvalid>;
pub type AttestationValidationError = BlockOperationError<AttestationInvalid>;
pub type DepositValidationError = BlockOperationError<DepositInvalid>;
pub type ExitValidationError = BlockOperationError<ExitInvalid>;
pub type TransferValidationError = BlockOperationError<TransferInvalid>;
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum BlockProcessingError {
/// Validation completed successfully and the object is invalid.
Invalid(BlockInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
pub enum BlockOperationError<T> {
Invalid(T),
BeaconStateError(BeaconStateError),
/// Encountered an `ssz_types::Error` whilst attempting to determine validity.
SignatureSetError(SignatureSetError),
SszTypesError(ssz_types::Error),
}
impl_from_beacon_state_error!(BlockProcessingError);
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum BlockInvalid {
StateSlotMismatch,
ParentBlockRootMismatch {
state: Hash256,
block: Hash256,
},
ProposerSlashed(usize),
BadSignature,
BadRandaoSignature,
MaxAttestationsExceeded,
MaxAttesterSlashingsExceed,
MaxProposerSlashingsExceeded,
DepositCountInvalid,
DuplicateTransfers,
MaxExitsExceeded,
MaxTransfersExceed,
AttestationInvalid(usize, AttestationInvalid),
/// A `IndexedAttestation` inside an `AttesterSlashing` was invalid.
///
/// To determine the offending `AttesterSlashing` index, divide the error message `usize` by two.
IndexedAttestationInvalid(usize, IndexedAttestationInvalid),
AttesterSlashingInvalid(usize, AttesterSlashingInvalid),
ProposerSlashingInvalid(usize, ProposerSlashingInvalid),
DepositInvalid(usize, DepositInvalid),
// TODO: merge this into the `DepositInvalid` error.
DepositProcessingFailed(usize),
ExitInvalid(usize, ExitInvalid),
TransferInvalid(usize, TransferInvalid),
// NOTE: this is only used in tests, normally a state root mismatch is handled
// in the beacon_chain rather than in state_processing
StateRootMismatch,
impl<T> BlockOperationError<T> {
pub fn invalid(reason: T) -> BlockOperationError<T> {
BlockOperationError::Invalid(reason)
}
}
impl From<ssz_types::Error> for BlockProcessingError {
impl<T> From<BeaconStateError> for BlockOperationError<T> {
fn from(e: BeaconStateError) -> Self {
BlockOperationError::BeaconStateError(e)
}
}
impl<T> From<SignatureSetError> for BlockOperationError<T> {
fn from(e: SignatureSetError) -> Self {
BlockOperationError::SignatureSetError(e)
}
}
impl<T> From<ssz_types::Error> for BlockOperationError<T> {
fn from(error: ssz_types::Error) -> Self {
BlockProcessingError::SszTypesError(error)
BlockOperationError::SszTypesError(error)
}
}
impl Into<BlockProcessingError> for BlockInvalid {
fn into(self) -> BlockProcessingError {
BlockProcessingError::Invalid(self)
}
}
/*
* Attestation Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(AttestationInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
pub enum HeaderInvalid {
ProposalSignatureInvalid,
StateSlotMismatch,
ParentBlockRootMismatch { state: Hash256, block: Hash256 },
ProposerSlashed(usize),
}
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingInvalid {
/// The proposer index is not a known validator.
ProposerUnknown(u64),
/// The two proposal have different epochs.
///
/// (proposal_1_slot, proposal_2_slot)
ProposalEpochMismatch(Slot, Slot),
/// The proposals are identical and therefore not slashable.
ProposalsIdentical,
/// The specified proposer cannot be slashed because they are already slashed, or not active.
ProposerNotSlashable(u64),
/// The first proposal signature was invalid.
BadProposal1Signature,
/// The second proposal signature was invalid.
BadProposal2Signature,
}
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingInvalid {
/// The attestation data is identical, an attestation cannot conflict with itself.
AttestationDataIdentical,
/// The attestations were not in conflict.
NotSlashable,
/// The first `IndexedAttestation` was invalid.
IndexedAttestation1Invalid(BlockOperationError<IndexedAttestationInvalid>),
/// The second `IndexedAttestation` was invalid.
IndexedAttestation2Invalid(BlockOperationError<IndexedAttestationInvalid>),
/// The validator index is unknown. One cannot slash one who does not exist.
UnknownValidator(u64),
/// The specified validator has already been withdrawn.
ValidatorAlreadyWithdrawn(u64),
/// There were no indices able to be slashed.
NoSlashableIndices,
}
/// Describes why an object is invalid.
@ -186,69 +267,21 @@ pub enum AttestationInvalid {
BadIndexedAttestation(IndexedAttestationInvalid),
}
impl_from_beacon_state_error!(AttestationValidationError);
impl_into_with_index_with_beacon_error!(AttestationValidationError, AttestationInvalid);
impl From<IndexedAttestationValidationError> for AttestationValidationError {
fn from(err: IndexedAttestationValidationError) -> Self {
let IndexedAttestationValidationError::Invalid(e) = err;
AttestationValidationError::Invalid(AttestationInvalid::BadIndexedAttestation(e))
impl From<BlockOperationError<IndexedAttestationInvalid>>
for BlockOperationError<AttestationInvalid>
{
fn from(e: BlockOperationError<IndexedAttestationInvalid>) -> Self {
match e {
BlockOperationError::Invalid(e) => {
BlockOperationError::invalid(AttestationInvalid::BadIndexedAttestation(e))
}
BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e),
BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e),
BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e),
}
}
}
impl From<ssz_types::Error> for AttestationValidationError {
fn from(error: ssz_types::Error) -> Self {
Self::from(IndexedAttestationValidationError::from(error))
}
}
/*
* `AttesterSlashing` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(AttesterSlashingInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum AttesterSlashingInvalid {
/// The attestation data is identical, an attestation cannot conflict with itself.
AttestationDataIdentical,
/// The attestations were not in conflict.
NotSlashable,
/// The first `IndexedAttestation` was invalid.
IndexedAttestation1Invalid(IndexedAttestationInvalid),
/// The second `IndexedAttestation` was invalid.
IndexedAttestation2Invalid(IndexedAttestationInvalid),
/// The validator index is unknown. One cannot slash one who does not exist.
UnknownValidator(u64),
/// The specified validator has already been withdrawn.
ValidatorAlreadyWithdrawn(u64),
/// There were no indices able to be slashed.
NoSlashableIndices,
}
impl_from_beacon_state_error!(AttesterSlashingValidationError);
impl_into_with_index_with_beacon_error!(AttesterSlashingValidationError, AttesterSlashingInvalid);
/*
* `IndexedAttestation` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum IndexedAttestationValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(IndexedAttestationInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum IndexedAttestationInvalid {
/// The custody bit 0 validators intersect with the bit 1 validators.
@ -271,106 +304,24 @@ pub enum IndexedAttestationInvalid {
UnknownValidator(u64),
/// The indexed attestation aggregate signature was not valid.
BadSignature,
/// There was an error whilst attempting to get a set of signatures. The signatures may have
/// been invalid or an internal error occurred.
SignatureSetError(SignatureSetError),
}
impl Into<IndexedAttestationInvalid> for IndexedAttestationValidationError {
fn into(self) -> IndexedAttestationInvalid {
match self {
IndexedAttestationValidationError::Invalid(e) => e,
}
}
}
impl From<ssz_types::Error> for IndexedAttestationValidationError {
fn from(error: ssz_types::Error) -> Self {
IndexedAttestationValidationError::Invalid(
IndexedAttestationInvalid::CustodyBitfieldBoundsError(error),
)
}
}
impl_into_with_index_without_beacon_error!(
IndexedAttestationValidationError,
IndexedAttestationInvalid
);
/*
* `ProposerSlashing` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(ProposerSlashingInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum ProposerSlashingInvalid {
/// The proposer index is not a known validator.
ProposerUnknown(u64),
/// The two proposal have different epochs.
///
/// (proposal_1_slot, proposal_2_slot)
ProposalEpochMismatch(Slot, Slot),
/// The proposals are identical and therefore not slashable.
ProposalsIdentical,
/// The specified proposer cannot be slashed because they are already slashed, or not active.
ProposerNotSlashable(u64),
/// The first proposal signature was invalid.
BadProposal1Signature,
/// The second proposal signature was invalid.
BadProposal2Signature,
}
impl_into_with_index_without_beacon_error!(
ProposerSlashingValidationError,
ProposerSlashingInvalid
);
/*
* `Deposit` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum DepositValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(DepositInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum DepositInvalid {
/// The deposit index does not match the state index.
BadIndex { state: u64, deposit: u64 },
/// The signature (proof-of-possession) does not match the given pubkey.
BadSignature,
/// The signature does not represent a valid BLS signature.
BadSignatureBytes,
/// The signature or pubkey does not represent a valid BLS point.
BadBlsBytes,
/// The specified `branch` and `index` did not form a valid proof that the deposit is included
/// in the eth1 deposit root.
BadMerkleProof,
}
impl_from_beacon_state_error!(DepositValidationError);
impl_into_with_index_with_beacon_error!(DepositValidationError, DepositInvalid);
/*
* `Exit` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum ExitValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(ExitInvalid),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum ExitInvalid {
/// The specified validator is not active.
@ -390,24 +341,11 @@ pub enum ExitInvalid {
},
/// The exit signature was not signed by the validator.
BadSignature,
/// There was an error whilst attempting to get a set of signatures. The signatures may have
/// been invalid or an internal error occurred.
SignatureSetError(SignatureSetError),
}
impl_into_with_index_without_beacon_error!(ExitValidationError, ExitInvalid);
/*
* `Transfer` Validation
*/
/// The object is invalid or validation failed.
#[derive(Debug, PartialEq)]
pub enum TransferValidationError {
/// Validation completed successfully and the object is invalid.
Invalid(TransferInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
}
/// Describes why an object is invalid.
#[derive(Debug, PartialEq)]
pub enum TransferInvalid {
/// The validator indicated by `transfer.from` is unknown.
@ -460,6 +398,3 @@ pub enum TransferInvalid {
/// (proposer_balance, transfer_fee)
ProposerBalanceOverflow(u64, u64),
}
impl_from_beacon_state_error!(TransferValidationError);
impl_into_with_index_with_beacon_error!(TransferValidationError, TransferInvalid);

View File

@ -1,42 +1,25 @@
use super::errors::{
IndexedAttestationInvalid as Invalid, IndexedAttestationValidationError as Error,
};
use super::errors::{BlockOperationError, IndexedAttestationInvalid as Invalid};
use super::signature_sets::indexed_attestation_signature_set;
use crate::VerifySignatures;
use std::collections::HashSet;
use std::iter::FromIterator;
use tree_hash::TreeHash;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
fn error(reason: Invalid) -> BlockOperationError<Invalid> {
BlockOperationError::invalid(reason)
}
/// Verify an `IndexedAttestation`.
///
/// Spec v0.8.0
pub fn is_valid_indexed_attestation<T: EthSpec>(
state: &BeaconState<T>,
indexed_attestation: &IndexedAttestation<T>,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, true)
}
/// Verify but don't check the signature.
///
/// Spec v0.8.0
pub fn is_valid_indexed_attestation_without_signature<T: EthSpec>(
state: &BeaconState<T>,
indexed_attestation: &IndexedAttestation<T>,
spec: &ChainSpec,
) -> Result<(), Error> {
is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, false)
}
/// Optionally check the signature.
///
/// Spec v0.8.0
fn is_valid_indexed_attestation_parametric<T: EthSpec>(
state: &BeaconState<T>,
indexed_attestation: &IndexedAttestation<T>,
spec: &ChainSpec,
verify_signature: bool,
) -> Result<(), Error> {
) -> Result<()> {
let bit_0_indices = &indexed_attestation.custody_bit_0_indices;
let bit_1_indices = &indexed_attestation.custody_bit_1_indices;
@ -59,10 +42,10 @@ fn is_valid_indexed_attestation_parametric<T: EthSpec>(
);
// Check that both vectors of indices are sorted
let check_sorted = |list: &[u64]| -> Result<(), Error> {
let check_sorted = |list: &[u64]| -> Result<()> {
list.windows(2).enumerate().try_for_each(|(i, pair)| {
if pair[0] >= pair[1] {
invalid!(Invalid::BadValidatorIndicesOrdering(i));
return Err(error(Invalid::BadValidatorIndicesOrdering(i)));
} else {
Ok(())
}
@ -72,74 +55,18 @@ fn is_valid_indexed_attestation_parametric<T: EthSpec>(
check_sorted(&bit_0_indices)?;
check_sorted(&bit_1_indices)?;
if verify_signature {
is_valid_indexed_attestation_signature(state, indexed_attestation, spec)?;
if verify_signatures.is_true() {
verify!(
indexed_attestation_signature_set(
state,
&indexed_attestation.signature,
&indexed_attestation,
spec
)?
.is_valid(),
Invalid::BadSignature
);
}
Ok(())
}
/// Create an aggregate public key for a list of validators, failing if any key can't be found.
fn create_aggregate_pubkey<'a, T, I>(
state: &BeaconState<T>,
validator_indices: I,
) -> Result<AggregatePublicKey, Error>
where
I: IntoIterator<Item = &'a u64>,
T: EthSpec,
{
validator_indices.into_iter().try_fold(
AggregatePublicKey::new(),
|mut aggregate_pubkey, &validator_idx| {
state
.validators
.get(validator_idx as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx)))
.map(|validator| {
aggregate_pubkey.add(&validator.pubkey);
aggregate_pubkey
})
},
)
}
/// Verify the signature of an IndexedAttestation.
///
/// Spec v0.8.0
fn is_valid_indexed_attestation_signature<T: EthSpec>(
state: &BeaconState<T>,
indexed_attestation: &IndexedAttestation<T>,
spec: &ChainSpec,
) -> Result<(), Error> {
let bit_0_pubkey = create_aggregate_pubkey(state, &indexed_attestation.custody_bit_0_indices)?;
let bit_1_pubkey = create_aggregate_pubkey(state, &indexed_attestation.custody_bit_1_indices)?;
let message_0 = AttestationDataAndCustodyBit {
data: indexed_attestation.data.clone(),
custody_bit: false,
}
.tree_hash_root();
let message_1 = AttestationDataAndCustodyBit {
data: indexed_attestation.data.clone(),
custody_bit: true,
}
.tree_hash_root();
let messages = vec![&message_0[..], &message_1[..]];
let keys = vec![&bit_0_pubkey, &bit_1_pubkey];
let domain = spec.get_domain(
indexed_attestation.data.target.epoch,
Domain::Attestation,
&state.fork,
);
verify!(
indexed_attestation
.signature
.verify_multiple(&messages[..], domain, &keys[..]),
Invalid::BadSignature
);
Ok(())
}

View File

@ -0,0 +1,282 @@
//! A `SignatureSet` is an abstraction over the components of a signature. A `SignatureSet` may be
//! validated individually, or alongside in others in a potentially cheaper bulk operation.
//!
//! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`.
use bls::SignatureSet;
use std::convert::TryInto;
use tree_hash::{SignedRoot, TreeHash};
use types::{
AggregateSignature, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock,
BeaconBlockHeader, BeaconState, BeaconStateError, ChainSpec, Deposit, Domain, EthSpec, Fork,
Hash256, IndexedAttestation, ProposerSlashing, PublicKey, RelativeEpoch, Signature, Transfer,
VoluntaryExit,
};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, PartialEq)]
pub enum Error {
/// Signature verification failed. The block is invalid.
SignatureInvalid,
/// There was an error attempting to read from a `BeaconState`. Block
/// validity was not determined.
BeaconStateError(BeaconStateError),
/// Attempted to find the public key of a validator that does not exist. You cannot distinguish
/// between an error and an invalid block in this case.
ValidatorUnknown(u64),
/// The public keys supplied do not match the number of objects requiring keys. Block validity
/// was not determined.
MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize },
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Error {
Error::BeaconStateError(e)
}
}
/// A signature set that is valid if a block was signed by the expected block producer.
pub fn block_proposal_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
block: &'a BeaconBlock<T>,
block_signed_root: Option<Hash256>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let block_proposer = &state.validators
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
let domain = spec.get_domain(
block.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer,
&state.fork,
);
let message = if let Some(root) = block_signed_root {
root.as_bytes().to_vec()
} else {
block.signed_root()
};
Ok(SignatureSet::single(
&block.signature,
&block_proposer.pubkey,
message,
domain,
))
}
/// A signature set that is valid if the block proposers randao reveal signature is correct.
pub fn randao_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
block: &'a BeaconBlock<T>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let block_proposer = &state.validators
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
let domain = spec.get_domain(
block.slot.epoch(T::slots_per_epoch()),
Domain::Randao,
&state.fork,
);
let message = state.current_epoch().tree_hash_root();
Ok(SignatureSet::single(
&block.body.randao_reveal,
&block_proposer.pubkey,
message,
domain,
))
}
/// Returns two signature sets, one for each `BlockHeader` included in the `ProposerSlashing`.
pub fn proposer_slashing_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
proposer_slashing: &'a ProposerSlashing,
spec: &'a ChainSpec,
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> {
let proposer = state
.validators
.get(proposer_slashing.proposer_index as usize)
.ok_or_else(|| Error::ValidatorUnknown(proposer_slashing.proposer_index))?;
Ok((
block_header_signature_set(state, &proposer_slashing.header_1, &proposer.pubkey, spec)?,
block_header_signature_set(state, &proposer_slashing.header_2, &proposer.pubkey, spec)?,
))
}
/// Returns a signature set that is valid if the given `pubkey` signed the `header`.
fn block_header_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
header: &'a BeaconBlockHeader,
pubkey: &'a PublicKey,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let domain = spec.get_domain(
header.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer,
&state.fork,
);
let message = header.signed_root();
Ok(SignatureSet::single(
&header.signature,
pubkey,
message,
domain,
))
}
/// Returns the signature set for the given `indexed_attestation`.
pub fn indexed_attestation_signature_set<'a, 'b, T: EthSpec>(
state: &'a BeaconState<T>,
signature: &'a AggregateSignature,
indexed_attestation: &'b IndexedAttestation<T>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let message_0 = AttestationDataAndCustodyBit {
data: indexed_attestation.data.clone(),
custody_bit: false,
}
.tree_hash_root();
let message_1 = AttestationDataAndCustodyBit {
data: indexed_attestation.data.clone(),
custody_bit: true,
}
.tree_hash_root();
let domain = spec.get_domain(
indexed_attestation.data.target.epoch,
Domain::Attestation,
&state.fork,
);
Ok(SignatureSet::dual(
signature,
message_0,
get_pubkeys(state, &indexed_attestation.custody_bit_0_indices)?,
message_1,
get_pubkeys(state, &indexed_attestation.custody_bit_1_indices)?,
domain,
))
}
/// Returns the signature set for the given `attester_slashing` and corresponding `pubkeys`.
pub fn attester_slashing_signature_sets<'a, T: EthSpec>(
state: &'a BeaconState<T>,
attester_slashing: &'a AttesterSlashing<T>,
spec: &'a ChainSpec,
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> {
Ok((
indexed_attestation_signature_set(
state,
&attester_slashing.attestation_1.signature,
&attester_slashing.attestation_1,
spec,
)?,
indexed_attestation_signature_set(
state,
&attester_slashing.attestation_2.signature,
&attester_slashing.attestation_2,
spec,
)?,
))
}
/// Returns the BLS values in a `Deposit`, if they're all valid. Otherwise, returns `None`.
///
/// This method is separate to `deposit_signature_set` to satisfy lifetime requirements.
pub fn deposit_pubkey_signature_message(
deposit: &Deposit,
) -> Option<(PublicKey, Signature, Vec<u8>)> {
let pubkey = (&deposit.data.pubkey).try_into().ok()?;
let signature = (&deposit.data.signature).try_into().ok()?;
let message = deposit.data.signed_root();
Some((pubkey, signature, message))
}
/// Returns the signature set for some set of deposit signatures, made with
/// `deposit_pubkey_signature_message`.
pub fn deposit_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
pubkey_signature_message: &'a (PublicKey, Signature, Vec<u8>),
spec: &'a ChainSpec,
) -> SignatureSet<'a> {
let (pubkey, signature, message) = pubkey_signature_message;
// Note: Deposits are valid across forks, thus the deposit domain is computed
// with the fork zeroed.
let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default());
SignatureSet::single(signature, pubkey, message.clone(), domain)
}
/// Returns a signature set that is valid if the `VoluntaryExit` was signed by the indicated
/// validator.
pub fn exit_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
exit: &'a VoluntaryExit,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let validator = state
.validators
.get(exit.validator_index as usize)
.ok_or_else(|| Error::ValidatorUnknown(exit.validator_index))?;
let domain = spec.get_domain(exit.epoch, Domain::VoluntaryExit, &state.fork);
let message = exit.signed_root();
Ok(SignatureSet::single(
&exit.signature,
&validator.pubkey,
message,
domain,
))
}
/// Returns a signature set that is valid if the `Transfer` was signed by `transfer.pubkey`.
pub fn transfer_signature_set<'a, T: EthSpec>(
state: &'a BeaconState<T>,
transfer: &'a Transfer,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let domain = spec.get_domain(
transfer.slot.epoch(T::slots_per_epoch()),
Domain::Transfer,
&state.fork,
);
let message = transfer.signed_root();
Ok(SignatureSet::single(
&transfer.signature,
&transfer.pubkey,
message,
domain,
))
}
/// Maps validator indices to public keys.
fn get_pubkeys<'a, 'b, T, I>(
state: &'a BeaconState<T>,
validator_indices: I,
) -> Result<Vec<&'a PublicKey>>
where
I: IntoIterator<Item = &'b u64>,
T: EthSpec,
{
validator_indices
.into_iter()
.map(|&validator_idx| {
state
.validators
.get(validator_idx as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_idx))
.map(|validator| &validator.pubkey)
})
.collect()
}

View File

@ -1,7 +1,7 @@
#![cfg(all(test, not(feature = "fake_crypto")))]
use super::block_processing_builder::BlockProcessingBuilder;
use super::errors::*;
use crate::per_block_processing;
use crate::{per_block_processing, BlockSignatureStrategy};
use tree_hash::SignedRoot;
use types::*;
@ -13,7 +13,13 @@ fn valid_block_ok() {
let builder = get_builder(&spec);
let (block, mut state) = builder.build(None, None, &spec);
let result = per_block_processing(&mut state, &block, &spec);
let result = per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
);
assert_eq!(result, Ok(()));
}
@ -27,13 +33,19 @@ fn invalid_block_header_state_slot() {
state.slot = Slot::new(133713);
block.slot = Slot::new(424242);
let result = per_block_processing(&mut state, &block, &spec);
let result = per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
);
assert_eq!(
result,
Err(BlockProcessingError::Invalid(
BlockInvalid::StateSlotMismatch
))
Err(BlockProcessingError::HeaderInvalid {
reason: HeaderInvalid::StateSlotMismatch
})
);
}
@ -44,16 +56,22 @@ fn invalid_parent_block_root() {
let invalid_parent_root = Hash256::from([0xAA; 32]);
let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec);
let result = per_block_processing(&mut state, &block, &spec);
let result = per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
);
assert_eq!(
result,
Err(BlockProcessingError::Invalid(
BlockInvalid::ParentBlockRootMismatch {
Err(BlockProcessingError::HeaderInvalid {
reason: HeaderInvalid::ParentBlockRootMismatch {
state: Hash256::from_slice(&state.latest_block_header.signed_root()),
block: block.parent_root
}
))
})
);
}
@ -71,12 +89,20 @@ fn invalid_block_signature() {
block.signature = Signature::new(&message, domain, &keypair.sk);
// process block with invalid block signature
let result = per_block_processing(&mut state, &block, &spec);
let result = per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
);
// should get a BadSignature error
assert_eq!(
result,
Err(BlockProcessingError::Invalid(BlockInvalid::BadSignature))
Err(BlockProcessingError::HeaderInvalid {
reason: HeaderInvalid::ProposalSignatureInvalid
})
);
}
@ -89,15 +115,16 @@ fn invalid_randao_reveal_signature() {
let keypair = Keypair::random();
let (block, mut state) = builder.build(Some(keypair.sk), None, &spec);
let result = per_block_processing(&mut state, &block, &spec);
let result = per_block_processing(
&mut state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
&spec,
);
// should get a BadRandaoSignature error
assert_eq!(
result,
Err(BlockProcessingError::Invalid(
BlockInvalid::BadRandaoSignature
))
);
assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid));
}
fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder<MainnetEthSpec>) {

View File

@ -1,12 +1,16 @@
use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error};
use super::errors::{AttestationInvalid as Invalid, BlockOperationError};
use super::VerifySignatures;
use crate::common::get_indexed_attestation;
use crate::per_block_processing::{
is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature,
};
use crate::per_block_processing::is_valid_indexed_attestation;
use tree_hash::TreeHash;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
fn error(reason: Invalid) -> BlockOperationError<Invalid> {
BlockOperationError::invalid(reason)
}
/// Returns `Ok(())` if the given `attestation` is valid to be included in a block that is applied
/// to `state`. Otherwise, returns a descriptive `Err`.
///
@ -16,9 +20,9 @@ use types::*;
pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
state: &BeaconState<T>,
attestation: &Attestation<T>,
spec: &ChainSpec,
verify_signatures: VerifySignatures,
) -> Result<(), Error> {
spec: &ChainSpec,
) -> Result<()> {
let data = &attestation.data;
// Check attestation slot.
@ -40,7 +44,7 @@ pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
}
);
verify_attestation_for_state(state, attestation, spec, verify_signatures)
verify_attestation_for_state(state, attestation, verify_signatures, spec)
}
/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given
@ -53,9 +57,9 @@ pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
pub fn verify_attestation_for_state<T: EthSpec>(
state: &BeaconState<T>,
attestation: &Attestation<T>,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
verify_signature: VerifySignatures,
) -> Result<(), Error> {
) -> Result<()> {
let data = &attestation.data;
verify!(
data.crosslink.shard < T::ShardCount::to_u64(),
@ -90,11 +94,7 @@ pub fn verify_attestation_for_state<T: EthSpec>(
// Check signature and bitfields
let indexed_attestation = get_indexed_attestation(state, attestation)?;
if verify_signature == VerifySignatures::True {
is_valid_indexed_attestation(state, &indexed_attestation, spec)?;
} else {
is_valid_indexed_attestation_without_signature(state, &indexed_attestation, spec)?;
}
is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?;
Ok(())
}
@ -107,7 +107,7 @@ pub fn verify_attestation_for_state<T: EthSpec>(
fn verify_casper_ffg_vote<'a, T: EthSpec>(
attestation: &Attestation<T>,
state: &'a BeaconState<T>,
) -> Result<&'a Crosslink, Error> {
) -> Result<&'a Crosslink> {
let data = &attestation.data;
if data.target.epoch == state.current_epoch() {
verify!(
@ -130,6 +130,6 @@ fn verify_casper_ffg_vote<'a, T: EthSpec>(
);
Ok(state.get_previous_crosslink(data.crosslink.shard)?)
} else {
invalid!(Invalid::BadTargetEpoch)
return Err(error(Invalid::BadTargetEpoch));
}
}

View File

@ -1,8 +1,15 @@
use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error};
use super::errors::{AttesterSlashingInvalid as Invalid, BlockOperationError};
use super::is_valid_indexed_attestation::is_valid_indexed_attestation;
use crate::per_block_processing::VerifySignatures;
use std::collections::BTreeSet;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
fn error(reason: Invalid) -> BlockOperationError<Invalid> {
BlockOperationError::invalid(reason)
}
/// Indicates if an `AttesterSlashing` is valid to be included in a block in the current epoch of the given
/// state.
///
@ -13,8 +20,9 @@ pub fn verify_attester_slashing<T: EthSpec>(
state: &BeaconState<T>,
attester_slashing: &AttesterSlashing<T>,
should_verify_indexed_attestations: bool,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<()> {
let attestation_1 = &attester_slashing.attestation_1;
let attestation_2 = &attester_slashing.attestation_2;
@ -26,10 +34,10 @@ pub fn verify_attester_slashing<T: EthSpec>(
);
if should_verify_indexed_attestations {
is_valid_indexed_attestation(state, &attestation_1, spec)
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation1Invalid(e.into())))?;
is_valid_indexed_attestation(state, &attestation_2, spec)
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation2Invalid(e.into())))?;
is_valid_indexed_attestation(state, &attestation_1, verify_signatures, spec)
.map_err(|e| error(Invalid::IndexedAttestation1Invalid(e)))?;
is_valid_indexed_attestation(state, &attestation_2, verify_signatures, spec)
.map_err(|e| error(Invalid::IndexedAttestation2Invalid(e)))?;
}
Ok(())
@ -43,7 +51,7 @@ pub fn verify_attester_slashing<T: EthSpec>(
pub fn get_slashable_indices<T: EthSpec>(
state: &BeaconState<T>,
attester_slashing: &AttesterSlashing<T>,
) -> Result<Vec<u64>, Error> {
) -> Result<Vec<u64>> {
get_slashable_indices_modular(state, attester_slashing, |_, validator| {
validator.is_slashable_at(state.current_epoch())
})
@ -55,7 +63,7 @@ pub fn get_slashable_indices_modular<F, T: EthSpec>(
state: &BeaconState<T>,
attester_slashing: &AttesterSlashing<T>,
is_slashable: F,
) -> Result<Vec<u64>, Error>
) -> Result<Vec<u64>>
where
F: Fn(u64, &Validator) -> bool,
{
@ -81,7 +89,7 @@ where
let validator = state
.validators
.get(index as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(index)))?;
.ok_or_else(|| error(Invalid::UnknownValidator(index)))?;
if is_slashable(index, validator) {
slashable_indices.push(index);

View File

@ -1,9 +1,17 @@
use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error};
use super::errors::{BlockOperationError, DepositInvalid};
use crate::per_block_processing::signature_sets::{
deposit_pubkey_signature_message, deposit_signature_set,
};
use merkle_proof::verify_merkle_proof;
use std::convert::TryInto;
use tree_hash::{SignedRoot, TreeHash};
use tree_hash::TreeHash;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<DepositInvalid>>;
fn error(reason: DepositInvalid) -> BlockOperationError<DepositInvalid> {
BlockOperationError::invalid(reason)
}
/// Verify `Deposit.pubkey` signed `Deposit.signature`.
///
/// Spec v0.8.0
@ -11,18 +19,13 @@ pub fn verify_deposit_signature<T: EthSpec>(
state: &BeaconState<T>,
deposit: &Deposit,
spec: &ChainSpec,
pubkey: &PublicKey,
) -> Result<(), Error> {
// Note: Deposits are valid across forks, thus the deposit domain is computed
// with the fork zeroed.
let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default());
let signature: Signature = (&deposit.data.signature)
.try_into()
.map_err(|_| Error::Invalid(Invalid::BadSignatureBytes))?;
) -> Result<()> {
let deposit_signature_message = deposit_pubkey_signature_message(deposit)
.ok_or_else(|| error(DepositInvalid::BadBlsBytes))?;
verify!(
signature.verify(&deposit.data.signed_root(), domain, pubkey),
Invalid::BadSignature
deposit_signature_set(state, &deposit_signature_message, spec).is_valid(),
DepositInvalid::BadSignature
);
Ok(())
@ -37,7 +40,7 @@ pub fn verify_deposit_signature<T: EthSpec>(
pub fn get_existing_validator_index<T: EthSpec>(
state: &BeaconState<T>,
pub_key: &PublicKey,
) -> Result<Option<u64>, Error> {
) -> Result<Option<u64>> {
let validator_index = state.get_validator_index(pub_key)?;
Ok(validator_index.map(|idx| idx as u64))
}
@ -53,7 +56,7 @@ pub fn verify_deposit_merkle_proof<T: EthSpec>(
deposit: &Deposit,
deposit_index: u64,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<()> {
let leaf = deposit.data.tree_hash_root();
verify!(
@ -64,7 +67,7 @@ pub fn verify_deposit_merkle_proof<T: EthSpec>(
deposit_index as usize,
state.eth1_data.deposit_root,
),
Invalid::BadMerkleProof
DepositInvalid::BadMerkleProof
);
Ok(())

View File

@ -1,7 +1,13 @@
use super::errors::{ExitInvalid as Invalid, ExitValidationError as Error};
use tree_hash::SignedRoot;
use super::errors::{BlockOperationError, ExitInvalid};
use crate::per_block_processing::{signature_sets::exit_signature_set, VerifySignatures};
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<ExitInvalid>>;
fn error(reason: ExitInvalid) -> BlockOperationError<ExitInvalid> {
BlockOperationError::invalid(reason)
}
/// Indicates if an `Exit` is valid to be included in a block in the current epoch of the given
/// state.
///
@ -11,9 +17,10 @@ use types::*;
pub fn verify_exit<T: EthSpec>(
state: &BeaconState<T>,
exit: &VoluntaryExit,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, false)
) -> Result<()> {
verify_exit_parametric(state, exit, verify_signatures, spec, false)
}
/// Like `verify_exit` but doesn't run checks which may become true in future states.
@ -22,9 +29,10 @@ pub fn verify_exit<T: EthSpec>(
pub fn verify_exit_time_independent_only<T: EthSpec>(
state: &BeaconState<T>,
exit: &VoluntaryExit,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_exit_parametric(state, exit, spec, true)
) -> Result<()> {
verify_exit_parametric(state, exit, verify_signatures, spec, true)
}
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
@ -33,30 +41,31 @@ pub fn verify_exit_time_independent_only<T: EthSpec>(
fn verify_exit_parametric<T: EthSpec>(
state: &BeaconState<T>,
exit: &VoluntaryExit,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> {
) -> Result<()> {
let validator = state
.validators
.get(exit.validator_index as usize)
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
.ok_or_else(|| error(ExitInvalid::ValidatorUnknown(exit.validator_index)))?;
// Verify the validator is active.
verify!(
validator.is_active_at(state.current_epoch()),
Invalid::NotActive(exit.validator_index)
ExitInvalid::NotActive(exit.validator_index)
);
// Verify that the validator has not yet exited.
verify!(
validator.exit_epoch == spec.far_future_epoch,
Invalid::AlreadyExited(exit.validator_index)
ExitInvalid::AlreadyExited(exit.validator_index)
);
// Exits must specify an epoch when they become valid; they are not valid before then.
verify!(
time_independent_only || state.current_epoch() >= exit.epoch,
Invalid::FutureEpoch {
ExitInvalid::FutureEpoch {
state: state.current_epoch(),
exit: exit.epoch
}
@ -65,20 +74,18 @@ fn verify_exit_parametric<T: EthSpec>(
// Verify the validator has been active long enough.
verify!(
state.current_epoch() >= validator.activation_epoch + spec.persistent_committee_period,
Invalid::TooYoungToExit {
ExitInvalid::TooYoungToExit {
current_epoch: state.current_epoch(),
earliest_exit_epoch: validator.activation_epoch + spec.persistent_committee_period,
}
);
// Verify signature.
let message = exit.signed_root();
let domain = spec.get_domain(exit.epoch, Domain::VoluntaryExit, &state.fork);
verify!(
exit.signature
.verify(&message[..], domain, &validator.pubkey),
Invalid::BadSignature
);
if verify_signatures.is_true() {
verify!(
exit_signature_set(state, exit, spec)?.is_valid(),
ExitInvalid::BadSignature
);
}
Ok(())
}

View File

@ -1,7 +1,14 @@
use super::errors::{ProposerSlashingInvalid as Invalid, ProposerSlashingValidationError as Error};
use tree_hash::SignedRoot;
use super::errors::{BlockOperationError, ProposerSlashingInvalid as Invalid};
use super::signature_sets::proposer_slashing_signature_set;
use crate::VerifySignatures;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
fn error(reason: Invalid) -> BlockOperationError<Invalid> {
BlockOperationError::invalid(reason)
}
/// Indicates if a `ProposerSlashing` is valid to be included in a block in the current epoch of the given
/// state.
///
@ -11,14 +18,13 @@ use types::*;
pub fn verify_proposer_slashing<T: EthSpec>(
proposer_slashing: &ProposerSlashing,
state: &BeaconState<T>,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<()> {
let proposer = state
.validators
.get(proposer_slashing.proposer_index as usize)
.ok_or_else(|| {
Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index))
})?;
.ok_or_else(|| error(Invalid::ProposerUnknown(proposer_slashing.proposer_index)))?;
// Verify that the epoch is the same
verify!(
@ -42,44 +48,12 @@ pub fn verify_proposer_slashing<T: EthSpec>(
Invalid::ProposerNotSlashable(proposer_slashing.proposer_index)
);
verify!(
verify_header_signature::<T>(
&proposer_slashing.header_1,
&proposer.pubkey,
&state.fork,
spec
),
Invalid::BadProposal1Signature
);
verify!(
verify_header_signature::<T>(
&proposer_slashing.header_2,
&proposer.pubkey,
&state.fork,
spec
),
Invalid::BadProposal2Signature
);
if verify_signatures.is_true() {
let (signature_set_1, signature_set_2) =
proposer_slashing_signature_set(state, proposer_slashing, spec)?;
verify!(signature_set_1.is_valid(), Invalid::BadProposal1Signature);
verify!(signature_set_2.is_valid(), Invalid::BadProposal2Signature);
}
Ok(())
}
/// Verifies the signature of a proposal.
///
/// Returns `true` if the signature is valid.
///
/// Spec v0.8.0
fn verify_header_signature<T: EthSpec>(
header: &BeaconBlockHeader,
pubkey: &PublicKey,
fork: &Fork,
spec: &ChainSpec,
) -> bool {
let message = header.signed_root();
let domain = spec.get_domain(
header.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer,
fork,
);
header.signature.verify(&message[..], domain, pubkey)
}

View File

@ -1,8 +1,15 @@
use super::errors::{TransferInvalid as Invalid, TransferValidationError as Error};
use super::errors::{BlockOperationError, TransferInvalid as Invalid};
use crate::per_block_processing::signature_sets::transfer_signature_set;
use crate::per_block_processing::VerifySignatures;
use bls::get_withdrawal_credentials;
use tree_hash::SignedRoot;
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
fn error(reason: Invalid) -> BlockOperationError<Invalid> {
BlockOperationError::invalid(reason)
}
/// Indicates if a `Transfer` is valid to be included in a block in the current epoch of the given
/// state.
///
@ -12,9 +19,10 @@ use types::*;
pub fn verify_transfer<T: EthSpec>(
state: &BeaconState<T>,
transfer: &Transfer,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, false)
) -> Result<()> {
verify_transfer_parametric(state, transfer, verify_signatures, spec, false)
}
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
@ -23,9 +31,10 @@ pub fn verify_transfer<T: EthSpec>(
pub fn verify_transfer_time_independent_only<T: EthSpec>(
state: &BeaconState<T>,
transfer: &Transfer,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), Error> {
verify_transfer_parametric(state, transfer, spec, true)
) -> Result<()> {
verify_transfer_parametric(state, transfer, verify_signatures, spec, true)
}
/// Parametric version of `verify_transfer` that allows some checks to be skipped.
@ -41,24 +50,25 @@ pub fn verify_transfer_time_independent_only<T: EthSpec>(
fn verify_transfer_parametric<T: EthSpec>(
state: &BeaconState<T>,
transfer: &Transfer,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
time_independent_only: bool,
) -> Result<(), Error> {
) -> Result<()> {
let sender_balance = *state
.balances
.get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
.ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?;
let recipient_balance = *state
.balances
.get(transfer.recipient as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.recipient)))?;
.ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.recipient)))?;
// Safely determine `amount + fee`.
let total_amount = transfer
.amount
.checked_add(transfer.fee)
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
.ok_or_else(|| error(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
// Verify the sender has adequate balance.
verify!(
@ -99,7 +109,7 @@ fn verify_transfer_parametric<T: EthSpec>(
let sender_validator = state
.validators
.get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
.ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?;
// Ensure one of the following is met:
//
@ -131,19 +141,12 @@ fn verify_transfer_parametric<T: EthSpec>(
)
);
// Verify the transfer signature.
let message = transfer.signed_root();
let domain = spec.get_domain(
transfer.slot.epoch(T::slots_per_epoch()),
Domain::Transfer,
&state.fork,
);
verify!(
transfer
.signature
.verify(&message[..], domain, &transfer.pubkey),
Invalid::BadSignature
);
if verify_signatures.is_true() {
verify!(
transfer_signature_set(state, transfer, spec)?.is_valid(),
Invalid::BadSignature
);
}
Ok(())
}
@ -157,15 +160,15 @@ pub fn execute_transfer<T: EthSpec>(
state: &mut BeaconState<T>,
transfer: &Transfer,
spec: &ChainSpec,
) -> Result<(), Error> {
) -> Result<()> {
let sender_balance = *state
.balances
.get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
.ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?;
let recipient_balance = *state
.balances
.get(transfer.recipient as usize)
.ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.recipient)))?;
.ok_or_else(|| error(Invalid::ToValidatorUnknown(transfer.recipient)))?;
let proposer_index =
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?;
@ -174,11 +177,11 @@ pub fn execute_transfer<T: EthSpec>(
let total_amount = transfer
.amount
.checked_add(transfer.fee)
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
.ok_or_else(|| error(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
state.balances[transfer.sender as usize] =
sender_balance.checked_sub(total_amount).ok_or_else(|| {
Error::Invalid(Invalid::FromBalanceInsufficient(
error(Invalid::FromBalanceInsufficient(
total_amount,
sender_balance,
))
@ -187,7 +190,7 @@ pub fn execute_transfer<T: EthSpec>(
state.balances[transfer.recipient as usize] = recipient_balance
.checked_add(transfer.amount)
.ok_or_else(|| {
Error::Invalid(Invalid::ToBalanceOverflow(
error(Invalid::ToBalanceOverflow(
recipient_balance,
transfer.amount,
))
@ -195,7 +198,7 @@ pub fn execute_transfer<T: EthSpec>(
state.balances[proposer_index] =
proposer_balance.checked_add(transfer.fee).ok_or_else(|| {
Error::Invalid(Invalid::ProposerBalanceOverflow(
error(Invalid::ProposerBalanceOverflow(
proposer_balance,
transfer.fee,
))

View File

@ -0,0 +1,182 @@
use log::info;
use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder};
use types::{EthSpec, *};
pub struct BlockBuilder<T: EthSpec> {
pub state_builder: TestingBeaconStateBuilder<T>,
pub block_builder: TestingBeaconBlockBuilder<T>,
pub num_validators: usize,
pub num_proposer_slashings: usize,
pub num_attester_slashings: usize,
pub num_attestations: usize,
pub num_deposits: usize,
pub num_exits: usize,
pub num_transfers: usize,
}
impl<T: EthSpec> BlockBuilder<T> {
pub fn new(num_validators: usize, spec: &ChainSpec) -> Self {
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec);
let block_builder = TestingBeaconBlockBuilder::new(spec);
Self {
state_builder,
block_builder,
num_validators: 0,
num_proposer_slashings: 0,
num_attester_slashings: 0,
num_attestations: 0,
num_deposits: 0,
num_exits: 0,
num_transfers: 0,
}
}
pub fn maximize_block_operations(&mut self) {
self.num_proposer_slashings = T::MaxProposerSlashings::to_usize();
self.num_attester_slashings = T::MaxAttesterSlashings::to_usize();
self.num_attestations = T::MaxAttestations::to_usize();
self.num_deposits = T::MaxDeposits::to_usize();
self.num_exits = T::MaxVoluntaryExits::to_usize();
self.num_transfers = T::MaxTransfers::to_usize();
}
pub fn set_slot(&mut self, slot: Slot) {
self.state_builder.teleport_to_slot(slot);
}
pub fn build_caches(&mut self, spec: &ChainSpec) {
// Builds all caches; benches will not contain shuffling/committee building times.
self.state_builder.build_caches(&spec).unwrap();
}
pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock<T>, BeaconState<T>) {
let (mut state, keypairs) = self.state_builder.build();
let builder = &mut self.block_builder;
builder.set_slot(state.slot);
let proposer_index = state
.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)
.unwrap();
let proposer_keypair = &keypairs[proposer_index];
builder.set_randao_reveal(&proposer_keypair.sk, &state.fork, spec);
let parent_root = state.latest_block_header.canonical_root();
builder.set_parent_root(parent_root);
// Used as a stream of validator indices for use in slashings, exits, etc.
let mut validators_iter = (0..keypairs.len() as u64).into_iter();
// Insert `ProposerSlashing` objects.
for _ in 0..self.num_proposer_slashings {
let validator_index = validators_iter.next().expect("Insufficient validators.");
builder.insert_proposer_slashing(
validator_index,
&keypairs[validator_index as usize].sk,
&state.fork,
spec,
);
}
info!(
"Inserted {} proposer slashings.",
builder.block.body.proposer_slashings.len()
);
// Insert `AttesterSlashing` objects
for _ in 0..self.num_attester_slashings {
let mut attesters: Vec<u64> = vec![];
let mut secret_keys: Vec<&SecretKey> = vec![];
const NUM_SLASHED_INDICES: usize = 12;
for _ in 0..NUM_SLASHED_INDICES {
let validator_index = validators_iter.next().expect("Insufficient validators.");
attesters.push(validator_index);
secret_keys.push(&keypairs[validator_index as usize].sk);
}
builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec);
}
info!(
"Inserted {} attester slashings.",
builder.block.body.attester_slashings.len()
);
// Insert `Attestation` objects.
let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect();
builder
.insert_attestations(
&state,
&all_secret_keys,
self.num_attestations as usize,
spec,
)
.unwrap();
info!(
"Inserted {} attestations.",
builder.block.body.attestations.len()
);
// Insert `Deposit` objects.
for i in 0..self.num_deposits {
builder.insert_deposit(
32_000_000_000,
state.eth1_data.deposit_count + (i as u64),
&state,
spec,
);
}
state.eth1_data.deposit_count += self.num_deposits as u64;
info!("Inserted {} deposits.", builder.block.body.deposits.len());
// Insert the maximum possible number of `Exit` objects.
for _ in 0..self.num_exits {
let validator_index = validators_iter.next().expect("Insufficient validators.");
builder.insert_exit(
&state,
validator_index,
&keypairs[validator_index as usize].sk,
spec,
);
}
info!(
"Inserted {} exits.",
builder.block.body.voluntary_exits.len()
);
// Insert the maximum possible number of `Transfer` objects.
for _ in 0..self.num_transfers {
let validator_index = validators_iter.next().expect("Insufficient validators.");
// Manually set the validator to be withdrawn.
state.validators[validator_index as usize].withdrawable_epoch = state.previous_epoch();
builder.insert_transfer(
&state,
validator_index,
validator_index,
1,
keypairs[validator_index as usize].clone(),
spec,
);
}
info!("Inserted {} transfers.", builder.block.body.transfers.len());
// Set the eth1 data to be different from the state.
self.block_builder.block.body.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]);
let block = self
.block_builder
.build(&proposer_keypair.sk, &state.fork, spec);
(block, state)
}
}

View File

@ -0,0 +1,208 @@
use state_processing::{
per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy,
};
use types::{
AggregateSignature, BeaconBlock, BeaconState, ChainSpec, EthSpec, Keypair, MinimalEthSpec,
Signature, Slot,
};
const VALIDATOR_COUNT: usize = 64;
fn get_block<T, F>(mut mutate_builder: F) -> (BeaconBlock<T>, BeaconState<T>)
where
T: EthSpec,
F: FnMut(&mut BlockBuilder<T>),
{
let spec = T::default_spec();
let mut builder: BlockBuilder<T> = BlockBuilder::new(VALIDATOR_COUNT, &spec);
builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2));
builder.build_caches(&spec);
mutate_builder(&mut builder);
builder.build(&spec)
}
fn test_scenario<T: EthSpec, F, G>(mutate_builder: F, mut invalidate_block: G, spec: &ChainSpec)
where
T: EthSpec,
F: FnMut(&mut BlockBuilder<T>),
G: FnMut(&mut BeaconBlock<T>),
{
let (mut block, state) = get_block::<T, _>(mutate_builder);
/*
* Control check to ensure the valid block should pass verification.
*/
assert_eq!(
per_block_processing(
&mut state.clone(),
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
spec
),
Ok(()),
"valid block should pass with verify individual"
);
assert_eq!(
per_block_processing(
&mut state.clone(),
&block,
None,
BlockSignatureStrategy::VerifyBulk,
spec
),
Ok(()),
"valid block should pass with verify bulk"
);
invalidate_block(&mut block);
/*
* Check to ensure the invalid block fails.
*/
assert!(
per_block_processing(
&mut state.clone(),
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
spec
)
.is_err(),
"invalid block should fail with verify individual"
);
assert_eq!(
per_block_processing(
&mut state.clone(),
&block,
None,
BlockSignatureStrategy::VerifyBulk,
spec
),
Err(BlockProcessingError::BulkSignatureVerificationFailed),
"invalid block should fail with verify bulk"
);
}
// TODO: use lazy static
fn agg_sig() -> AggregateSignature {
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&sig());
agg_sig
}
// TODO: use lazy static
fn sig() -> Signature {
let keypair = Keypair::random();
Signature::new(&[42, 42], 12, &keypair.sk)
}
type TestEthSpec = MinimalEthSpec;
mod signatures_minimal {
use super::*;
#[test]
fn block_proposal() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(|_| {}, |block| block.signature = sig(), spec);
}
#[test]
fn randao() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(|_| {}, |block| block.body.randao_reveal = sig(), spec);
}
#[test]
fn proposer_slashing() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_proposer_slashings = 1;
},
|block| block.body.proposer_slashings[0].header_1.signature = sig(),
spec,
);
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_proposer_slashings = 1;
},
|block| block.body.proposer_slashings[0].header_2.signature = sig(),
spec,
);
}
#[test]
fn attester_slashing() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_attester_slashings = 1;
},
|block| block.body.attester_slashings[0].attestation_1.signature = agg_sig(),
spec,
);
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_attester_slashings = 1;
},
|block| block.body.attester_slashings[0].attestation_2.signature = agg_sig(),
spec,
);
}
#[test]
fn attestation() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_attestations = 1;
},
|block| block.body.attestations[0].signature = agg_sig(),
spec,
);
}
#[test]
// TODO: fix fail by making valid merkle proofs.
#[should_panic]
fn deposit() {
let spec = &TestEthSpec::default_spec();
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_deposits = 1;
},
|block| block.body.deposits[0].data.signature = sig().into(),
spec,
);
}
#[test]
fn exit() {
let mut spec = &mut TestEthSpec::default_spec();
// Allows the test to pass.
spec.persistent_committee_period = 0;
test_scenario::<TestEthSpec, _, _>(
|mut builder| {
builder.num_exits = 1;
},
|block| block.body.voluntary_exits[0].signature = sig(),
spec,
);
}
// Cannot test transfers because their length is zero.
}

View File

@ -398,6 +398,10 @@ impl<T: EthSpec> BeaconState<T> {
.ok_or_else(|| Error::SlotOutOfBounds)?;
let seed = self.get_seed(epoch, spec)?;
if first_committee.is_empty() {
return Err(Error::InsufficientValidators);
}
let mut i = 0;
Ok(loop {
let candidate_index = first_committee[(epoch.as_usize() + i) % first_committee.len()];

View File

@ -119,6 +119,10 @@ impl<T: EthSpec> TestingBeaconBlockBuilder<T> {
// - The shard of the committee.
let mut committees: Vec<(Slot, Vec<usize>, Vec<usize>, u64)> = vec![];
if slot < T::slots_per_epoch() {
panic!("slot is too low, will get stuck in loop")
}
// Loop backwards through slots gathering each committee, until:
//
// - The slot is too old to be included in a block at this slot.

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.9.0" }
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" }
eth2_hashing = { path = "../eth2_hashing" }
hex = "0.3"
rand = "^0.5"

View File

@ -1,5 +1,5 @@
use super::PublicKey;
use milagro_bls::AggregatePublicKey as RawAggregatePublicKey;
use milagro_bls::{AggregatePublicKey as RawAggregatePublicKey, G1Point};
/// A BLS aggregate public key.
///
@ -13,15 +13,31 @@ impl AggregatePublicKey {
AggregatePublicKey(RawAggregatePublicKey::new())
}
pub fn add_without_affine(&mut self, public_key: &PublicKey) {
self.0.point.add(&public_key.as_raw().point)
}
pub fn affine(&mut self) {
self.0.point.affine()
}
pub fn add(&mut self, public_key: &PublicKey) {
self.0.add(public_key.as_raw())
}
pub fn add_point(&mut self, point: &G1Point) {
self.0.point.add(point)
}
/// Returns the underlying public key.
pub fn as_raw(&self) -> &RawAggregatePublicKey {
&self.0
}
pub fn into_raw(self) -> RawAggregatePublicKey {
self.0
}
/// Return a hex string representation of this key's bytes.
#[cfg(test)]
pub fn as_hex_string(&self) -> String {

View File

@ -1,6 +1,7 @@
use super::*;
use milagro_bls::{
AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature,
G2Point,
};
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
@ -76,13 +77,13 @@ impl AggregateSignature {
aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect();
// Messages are concatenated into one long message.
let mut msg: Vec<u8> = vec![];
let mut msgs: Vec<Vec<u8>> = vec![];
for message in messages {
msg.extend_from_slice(message);
msgs.push(message.to_vec());
}
self.aggregate_signature
.verify_multiple(&msg[..], domain, &aggregate_public_keys[..])
.verify_multiple(&msgs, domain, &aggregate_public_keys[..])
}
/// Return AggregateSignature as bytes
@ -112,6 +113,19 @@ impl AggregateSignature {
Ok(Self::empty_signature())
}
/// Returns the underlying signature.
pub fn as_raw(&self) -> &RawAggregateSignature {
&self.aggregate_signature
}
/// Returns the underlying signature.
pub fn from_point(point: G2Point) -> Self {
Self {
aggregate_signature: RawAggregateSignature { point },
is_empty: false,
}
}
/// Returns if the AggregateSignature `is_empty`
pub fn is_empty(&self) -> bool {
self.is_empty

View File

@ -1,4 +1,5 @@
use super::{PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE};
use milagro_bls::G1Point;
/// A BLS aggregate public key.
///
@ -7,6 +8,8 @@ use super::{PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE};
#[derive(Debug, Clone, Default)]
pub struct FakeAggregatePublicKey {
bytes: Vec<u8>,
/// Never used, only use for compatibility with "real" `AggregatePublicKey`.
pub point: G1Point,
}
impl FakeAggregatePublicKey {
@ -14,10 +17,19 @@ impl FakeAggregatePublicKey {
Self::zero()
}
pub fn add_without_affine(&mut self, _public_key: &PublicKey) {
// No nothing.
}
pub fn affine(&mut self) {
// No nothing.
}
/// Creates a new all-zero's aggregate public key
pub fn zero() -> Self {
Self {
bytes: vec![0; BLS_PUBLIC_KEY_BYTE_SIZE],
point: G1Point::new(),
}
}
@ -25,10 +37,18 @@ impl FakeAggregatePublicKey {
// No nothing.
}
pub fn as_raw(&self) -> &FakeAggregatePublicKey {
pub fn add_point(&mut self, _point: &G1Point) {
// No nothing.
}
pub fn as_raw(&self) -> &Self {
&self
}
pub fn into_raw(self) -> Self {
self
}
pub fn as_bytes(&self) -> Vec<u8> {
self.bytes.clone()
}

View File

@ -2,6 +2,7 @@ use super::{
fake_aggregate_public_key::FakeAggregatePublicKey, fake_signature::FakeSignature,
BLS_AGG_SIG_BYTE_SIZE,
};
use milagro_bls::G2Point;
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor};
@ -14,6 +15,8 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode};
#[derive(Debug, PartialEq, Clone, Default, Eq)]
pub struct FakeAggregateSignature {
bytes: Vec<u8>,
/// Never used, only use for compatibility with "real" `AggregateSignature`.
pub point: G2Point,
}
impl FakeAggregateSignature {
@ -26,9 +29,14 @@ impl FakeAggregateSignature {
pub fn zero() -> Self {
Self {
bytes: vec![0; BLS_AGG_SIG_BYTE_SIZE],
point: G2Point::new(),
}
}
pub fn as_raw(&self) -> &Self {
&self
}
/// Does glorious nothing.
pub fn add(&mut self, _signature: &FakeSignature) {
// Do nothing.
@ -69,6 +77,7 @@ impl FakeAggregateSignature {
} else {
Ok(Self {
bytes: bytes.to_vec(),
point: G2Point::new(),
})
}
}

View File

@ -1,4 +1,5 @@
use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE};
use milagro_bls::G1Point;
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, HexVisitor};
@ -14,6 +15,8 @@ use std::hash::{Hash, Hasher};
#[derive(Debug, Clone, Eq)]
pub struct FakePublicKey {
bytes: Vec<u8>,
/// Never used, only use for compatibility with "real" `PublicKey`.
pub point: G1Point,
}
impl FakePublicKey {
@ -25,6 +28,7 @@ impl FakePublicKey {
pub fn zero() -> Self {
Self {
bytes: vec![0; BLS_PUBLIC_KEY_BYTE_SIZE],
point: G1Point::new(),
}
}
@ -39,6 +43,7 @@ impl FakePublicKey {
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
Ok(Self {
bytes: bytes.to_vec(),
point: G1Point::new(),
})
}
@ -61,6 +66,13 @@ impl FakePublicKey {
hex_encode(end_bytes)
}
/// Returns the point as a hex string of the SSZ encoding.
///
/// Note: the string is prefixed with `0x`.
pub fn as_hex_string(&self) -> String {
hex_encode(self.as_ssz_bytes())
}
// Returns itself
pub fn as_raw(&self) -> &Self {
self

View File

@ -1,5 +1,6 @@
use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE};
use hex::encode as hex_encode;
use milagro_bls::G2Point;
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::HexVisitor;
@ -13,6 +14,8 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode};
pub struct FakeSignature {
bytes: Vec<u8>,
is_empty: bool,
/// Never used, only use for compatibility with "real" `Signature`.
pub point: G2Point,
}
impl FakeSignature {
@ -26,6 +29,7 @@ impl FakeSignature {
Self {
bytes: vec![0; BLS_SIG_BYTE_SIZE],
is_empty: true,
point: G2Point::new(),
}
}
@ -39,6 +43,10 @@ impl FakeSignature {
true
}
pub fn as_raw(&self) -> &Self {
&self
}
/// _Always_ returns true.
pub fn verify_hashed(
&self,
@ -61,6 +69,7 @@ impl FakeSignature {
Ok(Self {
bytes: bytes.to_vec(),
is_empty,
point: G2Point::new(),
})
}
}

View File

@ -7,12 +7,14 @@ mod keypair;
mod public_key_bytes;
mod secret_key;
mod signature_bytes;
mod signature_set;
pub use crate::keypair::Keypair;
pub use crate::public_key_bytes::PublicKeyBytes;
pub use crate::secret_key::SecretKey;
pub use crate::signature_bytes::SignatureBytes;
pub use milagro_bls::{compress_g2, hash_on_g2};
pub use milagro_bls::{compress_g2, hash_on_g2, G1Point};
pub use signature_set::{verify_signature_sets, SignatureSet, SignedMessage};
#[cfg(feature = "fake_crypto")]
mod fake_aggregate_public_key;

View File

@ -28,7 +28,7 @@ impl PublicKey {
/// Returns the underlying point as compressed bytes.
///
/// Identical to `self.as_uncompressed_bytes()`.
fn as_bytes(&self) -> Vec<u8> {
pub fn as_bytes(&self) -> Vec<u8> {
self.as_raw().as_bytes()
}

View File

@ -0,0 +1,193 @@
use crate::{AggregatePublicKey, AggregateSignature, PublicKey, Signature};
use milagro_bls::{G1Point, G2Point};
#[cfg(not(feature = "fake_crypto"))]
use milagro_bls::AggregateSignature as RawAggregateSignature;
type Message = Vec<u8>;
type Domain = u64;
#[derive(Clone)]
pub struct SignedMessage<'a> {
signing_keys: Vec<&'a G1Point>,
message: Message,
}
impl<'a> SignedMessage<'a> {
pub fn new<T>(signing_keys: Vec<&'a T>, message: Message) -> Self
where
T: G1Ref,
{
Self {
signing_keys: signing_keys.iter().map(|k| k.g1_ref()).collect(),
message,
}
}
}
#[derive(Clone)]
pub struct SignatureSet<'a> {
pub signature: &'a G2Point,
signed_messages: Vec<SignedMessage<'a>>,
domain: Domain,
}
impl<'a> SignatureSet<'a> {
pub fn single<S, T>(
signature: &'a S,
signing_key: &'a T,
message: Message,
domain: Domain,
) -> Self
where
T: G1Ref,
S: G2Ref,
{
Self {
signature: signature.g2_ref(),
signed_messages: vec![SignedMessage::new(vec![signing_key], message)],
domain,
}
}
pub fn dual<S, T>(
signature: &'a S,
message_0: Message,
message_0_signing_keys: Vec<&'a T>,
message_1: Message,
message_1_signing_keys: Vec<&'a T>,
domain: Domain,
) -> Self
where
T: G1Ref,
S: G2Ref,
{
Self {
signature: signature.g2_ref(),
signed_messages: vec![
SignedMessage::new(message_0_signing_keys, message_0),
SignedMessage::new(message_1_signing_keys, message_1),
],
domain,
}
}
pub fn new<S>(signature: &'a S, signed_messages: Vec<SignedMessage<'a>>, domain: Domain) -> Self
where
S: G2Ref,
{
Self {
signature: signature.g2_ref(),
signed_messages,
domain,
}
}
pub fn is_valid(&self) -> bool {
let sig = milagro_bls::AggregateSignature {
point: self.signature.clone(),
};
let mut messages: Vec<Vec<u8>> = vec![];
let mut pubkeys = vec![];
self.signed_messages.iter().for_each(|signed_message| {
messages.push(signed_message.message.clone());
let point = if signed_message.signing_keys.len() == 1 {
signed_message.signing_keys[0].clone()
} else {
aggregate_public_keys(&signed_message.signing_keys)
};
pubkeys.push(milagro_bls::AggregatePublicKey { point });
});
let pubkey_refs: Vec<&milagro_bls::AggregatePublicKey> =
pubkeys.iter().map(std::borrow::Borrow::borrow).collect();
sig.verify_multiple(&messages, self.domain, &pubkey_refs)
}
}
#[cfg(not(feature = "fake_crypto"))]
pub fn verify_signature_sets<'a>(iter: impl Iterator<Item = SignatureSet<'a>>) -> bool {
let rng = &mut rand::thread_rng();
RawAggregateSignature::verify_multiple_signatures(rng, iter.map(Into::into))
}
#[cfg(feature = "fake_crypto")]
pub fn verify_signature_sets<'a>(_iter: impl Iterator<Item = SignatureSet<'a>>) -> bool {
true
}
type VerifySet<'a> = (G2Point, Vec<G1Point>, Vec<Vec<u8>>, u64);
impl<'a> Into<VerifySet<'a>> for SignatureSet<'a> {
fn into(self) -> VerifySet<'a> {
let signature = self.signature.clone();
let (pubkeys, messages): (Vec<G1Point>, Vec<Message>) = self
.signed_messages
.into_iter()
.map(|signed_message| {
let key = if signed_message.signing_keys.len() == 1 {
signed_message.signing_keys[0].clone()
} else {
aggregate_public_keys(&signed_message.signing_keys)
};
(key, signed_message.message)
})
.unzip();
(signature, pubkeys, messages, self.domain)
}
}
/// Create an aggregate public key for a list of validators, failing if any key can't be found.
fn aggregate_public_keys<'a>(public_keys: &'a [&'a G1Point]) -> G1Point {
let mut aggregate =
public_keys
.iter()
.fold(AggregatePublicKey::new(), |mut aggregate, &pubkey| {
aggregate.add_point(pubkey);
aggregate
});
aggregate.affine();
aggregate.into_raw().point
}
pub trait G1Ref {
fn g1_ref(&self) -> &G1Point;
}
impl G1Ref for AggregatePublicKey {
fn g1_ref(&self) -> &G1Point {
&self.as_raw().point
}
}
impl G1Ref for PublicKey {
fn g1_ref(&self) -> &G1Point {
&self.as_raw().point
}
}
pub trait G2Ref {
fn g2_ref(&self) -> &G2Point;
}
impl G2Ref for AggregateSignature {
fn g2_ref(&self) -> &G2Point {
&self.as_raw().point
}
}
impl G2Ref for Signature {
fn g2_ref(&self) -> &G2Point {
&self.as_raw().point
}
}

View File

@ -1,543 +1,3 @@
# simpleserialize (ssz) [WIP]
# simpleserialize (ssz)
This is currently a ***Work In Progress*** crate.
SimpleSerialize is a serialization protocol described by Vitalik Buterin. The
method is tentatively intended for use in the Ethereum Beacon Chain as
described in the [Ethereum 2.1 Spec](https://notes.ethereum.org/s/Syj3QZSxm).
The Beacon Chain specification is the core, canonical specification which we
are following.
The current reference implementation has been described in the [Beacon Chain
Repository](https://github.com/ethereum/beacon_chain/blob/master/ssz/ssz.py).
*Please Note: This implementation is presently a placeholder until the final
spec is decided.*\
*Do not rely upon it for reference.*
## Table of Contents
* [SimpleSerialize Overview](#simpleserialize-overview)
+ [Serialize/Encode](#serializeencode)
- [int or uint: 8/16/24/32/64/256](#int-or-uint-816243264256)
- [Address](#address)
- [Hash32](#hash32)
- [Bytes](#bytes)
- [List](#list)
+ [Deserialize/Decode](#deserializedecode)
- [Int or Uint: 8/16/24/32/64/256](#int-or-uint-816243264256)
- [Address](#address-1)
- [Hash32](#hash32-1)
- [Bytes](#bytes-1)
- [List](#list-1)
* [Technical Overview](#technical-overview)
* [Building](#building)
+ [Installing Rust](#installing-rust)
* [Dependencies](#dependencies)
+ [bytes v0.4.9](#bytes-v049)
+ [ethereum-types](#ethereum-types)
* [Interface](#interface)
+ [Encode](#encodable)
+ [Decode](#decodable)
+ [SszStream](#sszstream)
- [new()](#new)
- [append(&mut self, value: &E) -> &mut Self](#appendmut-self-value-e---mut-self)
- [append_encoded_val(&mut self, vec: &Vec)](#append_encoded_valmut-self-vec-vec)
- [append_vec(&mut self, vec: &Vec)](#append_vecmut-self-vec-vec)
- [drain(self) -> Vec](#drainself---vec)
+ [decode_ssz(ssz_bytes: &(u8), index: usize) -> Result](#decode_sszssz_bytes-u8-index-usize---resultt-usize-decodeerror)
+ [decode_ssz_list(ssz_bytes: &(u8), index: usize) -> Result, usize), DecodeError>](#decode_ssz_listssz_bytes-u8-index-usize---resultvec-usize-decodeerror)
+ [decode_length(bytes: &(u8), index: usize, length_bytes: usize) -> Result](#decode_lengthbytes-u8-index-usize-length_bytes-usize---resultusize-decodeerror)
* [Usage](#usage)
+ [Serializing/Encoding](#serializingencoding)
- [Rust](#rust)
* [Deserializing/Decoding](#deserializingdecoding)
- [Rust](#rust-1)
---
## SimpleSerialize Overview
The ``simpleserialize`` method for serialization follows simple byte conversion,
making it effective and efficient for encoding and decoding.
The decoding requires knowledge of the data **type** and the order of the
serialization.
Syntax:
| Shorthand | Meaning |
|:-------------|:----------------------------------------------------|
| `little` | ``little endian`` |
| `to_bytes` | convert to bytes. Params: ``(size, byte order)`` |
| `from_bytes` | convert from bytes. Params: ``(bytes, byte order)`` |
| `value` | the value to serialize |
| `rawbytes` | raw encoded/serialized bytes |
| `len(value)` | get the length of the value. (number of bytes etc) |
### Serialize/Encode
#### int or uint: 8/16/24/32/64/256
Convert directly to bytes the size of the int. (e.g. ``int16 = 2 bytes``)
All integers are serialized as **little endian**.
| Check to perform | Code |
|:-----------------------|:------------------------|
| Int size is not 0 | ``int_size > 0`` |
| Size is a byte integer | ``int_size % 8 == 0`` |
| Value is less than max | ``2**int_size > value`` |
```python
buffer_size = int_size / 8
return value.to_bytes(buffer_size, 'little')
```
#### Address
The address should already come as a hash/byte format. Ensure that length is
**20**.
| Check to perform | Code |
|:-----------------------|:---------------------|
| Length is correct (20) | ``len(value) == 20`` |
```python
assert( len(value) == 20 )
return value
```
#### Hash32
The hash32 should already be a 32 byte length serialized data format. The safety
check ensures the 32 byte length is satisfied.
| Check to perform | Code |
|:-----------------------|:---------------------|
| Length is correct (32) | ``len(value) == 32`` |
```python
assert( len(value) == 32 )
return value
```
#### Bytes
For general `byte` type:
1. Get the length/number of bytes; Encode into a 4 byte integer.
2. Append the value to the length and return: ``[ length_bytes ] + [
value_bytes ]``
```python
byte_length = (len(value)).to_bytes(4, 'little')
return byte_length + value
```
#### List
For lists of values, get the length of the list and then serialize the value
of each item in the list:
1. For each item in list:
1. serialize.
2. append to string.
2. Get size of serialized string. Encode into a 4 byte integer.
```python
serialized_list_string = ''
for item in value:
serialized_list_string += serialize(item)
serialized_len = len(serialized_list_string)
return serialized_len + serialized_list_string
```
### Deserialize/Decode
The decoding requires knowledge of the type of the item to be decoded. When
performing decoding on an entire serialized string, it also requires knowledge
of what order the objects have been serialized in.
Note: Each return will provide ``deserialized_object, new_index`` keeping track
of the new index.
At each step, the following checks should be made:
| Check Type | Check |
|:-------------------------|:----------------------------------------------------------|
| Ensure sufficient length | ``length(rawbytes) > current_index + deserialize_length`` |
#### Int or Uint: 8/16/24/32/64/256
Convert directly from bytes into integer utilising the number of bytes the same
size as the integer length. (e.g. ``int16 == 2 bytes``)
All integers are interpreted as **little endian**.
```python
byte_length = int_size / 8
new_index = current_index + int_size
return int.from_bytes(rawbytes[current_index:current_index+int_size], 'little'), new_index
```
#### Address
Return the 20 bytes.
```python
new_index = current_index + 20
return rawbytes[current_index:current_index+20], new_index
```
#### Hash32
Return the 32 bytes.
```python
new_index = current_index + 32
return rawbytes[current_index:current_index+32], new_index
```
#### Bytes
Get the length of the bytes, return the bytes.
```python
bytes_length = int.from_bytes(rawbytes[current_index:current_index+4], 'little')
new_index = current_index + 4 + bytes_lenth
return rawbytes[current_index+4:current_index+4+bytes_length], new_index
```
#### List
Deserialize each object in the list.
1. Get the length of the serialized list.
2. Loop through deseralizing each item in the list until you reach the
entire length of the list.
| Check type | code |
|:------------------------------------|:--------------------------------------|
| rawbytes has enough left for length | ``len(rawbytes) > current_index + 4`` |
```python
total_length = int.from_bytes(rawbytes[current_index:current_index+4], 'little')
new_index = current_index + 4 + total_length
item_index = current_index + 4
deserialized_list = []
while item_index < new_index:
object, item_index = deserialize(rawbytes, item_index, item_type)
deserialized_list.append(object)
return deserialized_list, new_index
```
## Technical Overview
The SimpleSerialize is a simple method for serializing objects for use in the
Ethereum beacon chain proposed by Vitalik Buterin. There are currently two
implementations denoting the functionality, the [Reference
Implementation](https://github.com/ethereum/beacon_chain/blob/master/ssz/ssz.py)
and the [Module](https://github.com/ethereum/research/tree/master/py_ssz) in
Ethereum research. It is being developed as a crate for the [**Rust programming
language**](https://www.rust-lang.org).
The crate will provide the functionality to serialize several types in
accordance with the spec and provide a serialized stream of bytes.
## Building
ssz currently builds on **rust v1.27.1**
### Installing Rust
The [**Rustup**](https://rustup.rs/) tool provides functionality to easily
manage rust on your local instance. It is a recommended method for installing
rust.
Installing on Linux or OSX:
```bash
curl https://sh.rustup.rs -sSf | sh
```
Installing on Windows:
* 32 Bit: [ https://win.rustup.rs/i686 ](https://win.rustup.rs/i686)
* 64 Bit: [ https://win.rustup.rs/x86_64 ](https://win.rustup.rs/x86_64)
## Dependencies
All dependencies are listed in the ``Cargo.toml`` file.
To build and install all related dependencies:
```bash
cargo build
```
### bytes v0.4.9
The `bytes` crate provides effective Byte Buffer implementations and
interfaces.
Documentation: [ https://docs.rs/bytes/0.4.9/bytes/ ](https://docs.rs/bytes/0.4.9/bytes/)
### ethereum-types
The `ethereum-types` provide primitives for types that are commonly used in the
ethereum protocol. This crate is provided by [Parity](https://www.parity.io/).
Github: [ https://github.com/paritytech/primitives ](https://github.com/paritytech/primitives)
---
## Interface
### Encode
A type is **Encode** if it has a valid ``ssz_append`` function. This is
used to ensure that the object/type can be serialized.
```rust
pub trait Encode {
fn ssz_append(&self, s: &mut SszStream);
}
```
### Decode
A type is **Decode** if it has a valid ``ssz_decode`` function. This is
used to ensure the object is deserializable.
```rust
pub trait Decode: Sized {
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError>;
}
```
### SszStream
The main implementation is the `SszStream` struct. The struct contains a
buffer of bytes, a Vector of `uint8`.
#### new()
Create a new, empty instance of the SszStream.
**Example**
```rust
let mut ssz = SszStream::new()
```
#### append<E>(&mut self, value: &E) -> &mut Self
Appends a value that can be encoded into the stream.
| Parameter | Description |
|:---------:|:-----------------------------------------|
| ``value`` | Encode value to append to the stream. |
**Example**
```rust
ssz.append(&x)
```
#### append_encoded_val(&mut self, vec: &Vec<u8>)
Appends some ssz encoded bytes to the stream.
| Parameter | Description |
|:---------:|:----------------------------------|
| ``vec`` | A vector of serialized ssz bytes. |
**Example**
```rust
let mut a = [0, 1];
ssz.append_encoded_val(&a.to_vec());
```
#### append_vec<E>(&mut self, vec: &Vec<E>)
Appends some vector (list) of encodable values to the stream.
| Parameter | Description |
|:---------:|:----------------------------------------------|
| ``vec`` | Vector of Encode objects to be serialized. |
**Example**
```rust
ssz.append_vec(attestations);
```
#### drain(self) -> Vec<u8>
Consumes the ssz stream and returns the buffer of bytes.
**Example**
```rust
ssz.drain()
```
### decode_ssz<T>(ssz_bytes: &[u8], index: usize) -> Result<(T, usize), DecodeError>
Decodes a single ssz serialized value of type `T`. Note: `T` must be decodable.
| Parameter | Description |
|:-------------:|:------------------------------------|
| ``ssz_bytes`` | Serialized list of bytes. |
| ``index`` | Starting index to deserialize from. |
**Returns**
| Return Value | Description |
|:-------------------:|:----------------------------------------------|
| ``Tuple(T, usize)`` | Returns the tuple of the type and next index. |
| ``DecodeError`` | Error if the decoding could not be performed. |
**Example**
```rust
let res: Result<(u16, usize), DecodeError> = decode_ssz(&encoded_ssz, 0);
```
### decode_ssz_list<T>(ssz_bytes: &[u8], index: usize) -> Result<(Vec<T>, usize), DecodeError>
Decodes a list of serialized values into a vector.
| Parameter | Description |
|:-------------:|:------------------------------------|
| ``ssz_bytes`` | Serialized list of bytes. |
| ``index`` | Starting index to deserialize from. |
**Returns**
| Return Value | Description |
|:------------------------:|:----------------------------------------------|
| ``Tuple(Vec<T>, usize)`` | Returns the tuple of the type and next index. |
| ``DecodeError`` | Error if the decoding could not be performed. |
**Example**
```rust
let decoded: Result<(Vec<usize>, usize), DecodeError> = decode_ssz_list( &encoded_ssz, 0);
```
### decode_length(bytes: &[u8], index: usize, length_bytes: usize) -> Result<usize, DecodeError>
Deserializes the "length" value in the serialized bytes from the index. The
length of bytes is given (usually 4 stated in the reference implementation) and
is often the value appended to the list in front of the actual serialized
object.
| Parameter | Description |
|:----------------:|:-------------------------------------------|
| ``bytes`` | Serialized list of bytes. |
| ``index`` | Starting index to deserialize from. |
| ``length_bytes`` | Number of bytes to deserialize into usize. |
**Returns**
| Return Value | Description |
|:---------------:|:-----------------------------------------------------------|
| ``usize`` | The length of the serialized object following this length. |
| ``DecodeError`` | Error if the decoding could not be performed. |
**Example**
```rust
let length_of_serialized: Result<usize, DecodeError> = decode_length(&encoded, 0, 4);
```
---
## Usage
### Serializing/Encoding
#### Rust
Create the `simpleserialize` stream that will produce the serialized objects.
```rust
let mut ssz = SszStream::new();
```
Encode the values that you need by using the ``append(..)`` method on the `SszStream`.
The **append** function is how the value gets serialized.
```rust
let x: u64 = 1 << 32;
ssz.append(&x);
```
To get the serialized byte vector use ``drain()`` on the `SszStream`.
```rust
ssz.drain()
```
**Example**
```rust
// 1 << 32 = 4294967296;
// As bytes it should equal: [0,0,0,1,0,0,0]
let x: u64 = 1 << 32;
// Create the new ssz stream
let mut ssz = SszStream::new();
// Serialize x
ssz.append(&x);
// Check that it is correct.
assert_eq!(ssz.drain(), vec![0,0,0,1,0,0,0]);
```
## Deserializing/Decoding
#### Rust
From the `simpleserialize` bytes, we are converting to the object.
```rust
let ssz = vec![0, 0, 8, 255, 255, 255, 255, 255, 255, 255, 255];
// Returns the result and the next index to decode.
let (result, index): (u64, usize) = decode_ssz(&ssz, 3).unwrap();
// Check for correctness
// 2**64-1 = 18446744073709551615
assert_eq!(result, 18446744073709551615);
// Index = 3 (initial index) + 8 (8 byte int) = 11
assert_eq!(index, 11);
```
Decoding a list of items:
```rust
// Encoded/Serialized list with junk numbers at the front
let serialized_list = vec![ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 32, 0, 0, 0,
0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0,
0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15];
// Returns the result (Vector of usize) and the index of the next
let decoded: (Vec<usize>, usize) = decode_ssz_list(&serialized_list, 10).unwrap();
// Check for correctness
assert_eq!(decoded.0, vec![15,15,15,15]);
assert_eq!(decoded.1, 46);
```
[<img src="https://img.shields.io/crates/v/eth2_ssz">](https://crates.io/crates/eth2_ssz)

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_attestations;
use state_processing::per_block_processing::{process_attestations, VerifySignatures};
use types::{Attestation, BeaconState, EthSpec};
#[derive(Debug, Clone, Deserialize)]
@ -38,7 +38,7 @@ impl<E: EthSpec> Case for OperationsAttestation<E> {
// Processing requires the epoch cache.
state.build_all_caches(spec).unwrap();
let result = process_attestations(&mut state, &[attestation], spec);
let result = process_attestations(&mut state, &[attestation], VerifySignatures::True, spec);
let mut result = result.and_then(|_| Ok(state));

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_attester_slashings;
use state_processing::per_block_processing::{process_attester_slashings, VerifySignatures};
use types::{AttesterSlashing, BeaconState, EthSpec};
#[derive(Debug, Clone, Deserialize)]
@ -38,8 +38,12 @@ impl<E: EthSpec> Case for OperationsAttesterSlashing<E> {
// Processing requires the epoch cache.
state.build_all_caches(&E::default_spec()).unwrap();
let result =
process_attester_slashings(&mut state, &[attester_slashing], &E::default_spec());
let result = process_attester_slashings(
&mut state,
&[attester_slashing],
VerifySignatures::True,
&E::default_spec(),
);
let mut result = result.and_then(|_| Ok(state));

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_block_header;
use state_processing::per_block_processing::{process_block_header, VerifySignatures};
use types::{BeaconBlock, BeaconState, EthSpec};
#[derive(Debug, Clone, Deserialize)]
@ -37,7 +37,9 @@ impl<E: EthSpec> Case for OperationsBlockHeader<E> {
// Processing requires the epoch cache.
state.build_all_caches(spec).unwrap();
let mut result = process_block_header(&mut state, &self.block, spec, true).map(|_| state);
let mut result =
process_block_header(&mut state, &self.block, None, VerifySignatures::True, spec)
.map(|_| state);
compare_beacon_state_results_without_caches(&mut result, &mut expected)
}

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_exits;
use state_processing::per_block_processing::{process_exits, VerifySignatures};
use types::{BeaconState, EthSpec, VoluntaryExit};
#[derive(Debug, Clone, Deserialize)]
@ -36,7 +36,12 @@ impl<E: EthSpec> Case for OperationsExit<E> {
// Exit processing requires the epoch cache.
state.build_all_caches(&E::default_spec()).unwrap();
let result = process_exits(&mut state, &[exit], &E::default_spec());
let result = process_exits(
&mut state,
&[exit],
VerifySignatures::True,
&E::default_spec(),
);
let mut result = result.and_then(|_| Ok(state));

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_proposer_slashings;
use state_processing::per_block_processing::{process_proposer_slashings, VerifySignatures};
use types::{BeaconState, EthSpec, ProposerSlashing};
#[derive(Debug, Clone, Deserialize)]
@ -36,8 +36,12 @@ impl<E: EthSpec> Case for OperationsProposerSlashing<E> {
// Processing requires the epoch cache.
state.build_all_caches(&E::default_spec()).unwrap();
let result =
process_proposer_slashings(&mut state, &[proposer_slashing], &E::default_spec());
let result = process_proposer_slashings(
&mut state,
&[proposer_slashing],
VerifySignatures::True,
&E::default_spec(),
);
let mut result = result.and_then(|_| Ok(state));

View File

@ -2,7 +2,7 @@ use super::*;
use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::per_block_processing::process_transfers;
use state_processing::per_block_processing::{process_transfers, VerifySignatures};
use types::{BeaconState, EthSpec, Transfer};
#[derive(Debug, Clone, Deserialize)]
@ -38,7 +38,7 @@ impl<E: EthSpec> Case for OperationsTransfer<E> {
let spec = E::default_spec();
let result = process_transfers(&mut state, &[transfer], &spec);
let result = process_transfers(&mut state, &[transfer], VerifySignatures::True, &spec);
let mut result = result.and_then(|_| Ok(state));

View File

@ -3,7 +3,7 @@ use crate::bls_setting::BlsSetting;
use crate::case_result::compare_beacon_state_results_without_caches;
use serde_derive::Deserialize;
use state_processing::{
per_block_processing, per_slot_processing, BlockInvalid, BlockProcessingError,
per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy,
};
use types::{BeaconBlock, BeaconState, EthSpec, RelativeEpoch};
@ -50,14 +50,18 @@ impl<E: EthSpec> Case for SanityBlocks<E> {
.build_committee_cache(RelativeEpoch::Current, spec)
.unwrap();
per_block_processing(&mut state, block, spec)?;
per_block_processing(
&mut state,
block,
None,
BlockSignatureStrategy::VerifyIndividual,
spec,
)?;
if block.state_root == state.canonical_root() {
Ok(())
} else {
Err(BlockProcessingError::Invalid(
BlockInvalid::StateRootMismatch,
))
Err(BlockProcessingError::StateRootMismatch)
}
})
.map(|_| state);