Use global pubkey cache for block processing (#849)

* Start updating types

* WIP

* Signature hacking

* Existing EF tests passing with fake_crypto

* Updates

* Delete outdated API spec

* The refactor continues

* It compiles

* WIP test fixes

* All release tests passing bar genesis state parsing

* Update and test YamlConfig

* Update to spec v0.10 compatible BLS

* Updates to BLS EF tests

* Add EF test for AggregateVerify

And delete unused hash2curve tests for uncompressed points

* Update EF tests to v0.10.1

* Use optional block root correctly in block proc

* Use genesis fork in deposit domain. All tests pass

* Cargo fmt

* Fast aggregate verify test

* Update REST API docs

* Cargo fmt

* Fix unused import

* Bump spec tags to v0.10.1

* Add `seconds_per_eth1_block` to chainspec

* Update to timestamp based eth1 voting scheme

* Return None from `get_votes_to_consider` if block cache is empty

* Handle overflows in `is_candidate_block`

* Revert to failing tests

* Fix eth1 data sets test

* Choose default vote according to spec

* Fix collect_valid_votes tests

* Fix `get_votes_to_consider` to choose all eligible blocks

* Uncomment winning_vote tests

* Add comments; remove unused code

* Reduce seconds_per_eth1_block for simulation

* Addressed review comments

* Add test for default vote case

* Fix logs

* Remove unused functions

* Meter default eth1 votes

* Fix comments

* Address review comments; remove unused dependency

* Add first attempt at attestation proc. re-write

* Add version 2 of attestation processing

* Minor fixes

* Add validator pubkey cache

* Make get_indexed_attestation take a committee

* Link signature processing into new attn verification

* First working version

* Ensure pubkey cache is updated

* Add more metrics, slight optimizations

* Clone committee cache during attestation processing

* Update shuffling cache during block processing

* Remove old commented-out code

* Fix shuffling cache insert bug

* Used indexed attestation in fork choice

* Restructure attn processing, add metrics

* Add more detailed metrics

* Tidy, fix failing tests

* Fix failing tests, tidy

* Disable/delete two outdated tests

* Add new Pubkeys struct to signature_sets

* Refactor with functional approach

* Update beacon chain

* Remove decompressed member from pubkey bytes

* Add hashmap for indices lookup

* Change `get_attesting_indices` to use Vec

* Fix failing test

* Tidy

* Add pubkey cache persistence file

* Add more comments

* Integrate persistence file into builder

* Add pubkey cache tests

* Add data_dir to beacon chain builder

* Remove Option in pubkey cache persistence file

* Ensure consistency between datadir/data_dir

* Fix failing network test

* Tidy

* Fix todos

* Improve tests

* Split up block processing metrics

* Tidy

* Refactor get_pubkey_from_state

* Remove commented-out code

* Add BeaconChain::validator_pubkey

* Use Option::filter

* Remove Box

* Comment out tests that fail due to hard-coded

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
This commit is contained in:
Paul Hauner 2020-04-06 14:13:19 +10:00 committed by GitHub
parent 6f3503b0e9
commit 54782d896c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 432 additions and 173 deletions

View File

@ -21,7 +21,7 @@ use state_processing::per_block_processing::errors::{
use state_processing::{
common::get_indexed_attestation, per_block_processing, per_slot_processing,
signature_sets::indexed_attestation_signature_set_from_pubkeys, BlockProcessingError,
BlockSignatureStrategy,
BlockSignatureStrategy, BlockSignatureVerifier,
};
use std::borrow::Cow;
use std::cmp::Ordering;
@ -74,7 +74,10 @@ pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32];
#[derive(Debug, PartialEq)]
pub enum BlockProcessingOutcome {
/// Block was valid and imported into the block graph.
Processed { block_root: Hash256 },
Processed {
block_root: Hash256,
},
InvalidSignature,
/// The parent block was unknown.
ParentUnknown {
parent: Hash256,
@ -86,7 +89,10 @@ pub enum BlockProcessingOutcome {
block_slot: Slot,
},
/// The block state_root does not match the generated state.
StateRootMismatch { block: Hash256, local: Hash256 },
StateRootMismatch {
block: Hash256,
local: Hash256,
},
/// The block was a genesis block, these blocks cannot be re-imported.
GenesisBlock,
/// The slot is finalized, no need to import.
@ -594,14 +600,44 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns the validator index (if any) for the given public key.
///
/// Information is retrieved from the present `beacon_state.validators`.
/// ## Notes
///
/// This query uses the `validator_pubkey_cache` which contains _all_ validators ever seen,
/// even if those validators aren't included in the head state. It is important to remember
/// that just because a validator exists here, it doesn't necessarily exist in all
/// `BeaconStates`.
///
/// ## Errors
///
/// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out.
pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Result<Option<usize>, Error> {
for (i, validator) in self.head()?.beacon_state.validators.iter().enumerate() {
if validator.pubkey == *pubkey {
return Ok(Some(i));
}
}
Ok(None)
let pubkey_cache = self
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
Ok(pubkey_cache.get_index(pubkey))
}
/// Returns the validator pubkey (if any) for the given validator index.
///
/// ## Notes
///
/// This query uses the `validator_pubkey_cache` which contains _all_ validators ever seen,
/// even if those validators aren't included in the head state. It is important to remember
/// that just because a validator exists here, it doesn't necessarily exist in all
/// `BeaconStates`.
///
/// ## Errors
///
/// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out.
pub fn validator_pubkey(&self, validator_index: usize) -> Result<Option<PublicKey>, Error> {
let pubkey_cache = self
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
Ok(pubkey_cache.get(validator_index).cloned())
}
/// Returns the block canonical root of the current canonical chain at a given slot.
@ -1007,16 +1043,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
let pubkeys = indexed_attestation
.attesting_indices
.iter()
.map(|i| {
pubkey_cache
.get(*i as usize)
.ok_or_else(|| Error::ValidatorPubkeyCacheIncomplete(*i as usize))
})
.collect::<Result<Vec<&PublicKey>, Error>>()?;
let (fork, genesis_validators_root) = self
.canonical_head
.try_read_for(HEAD_LOCK_TIMEOUT)
@ -1029,7 +1055,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
})?;
let signature_set = indexed_attestation_signature_set_from_pubkeys(
pubkeys,
|validator_index| {
pubkey_cache
.get(validator_index)
.map(|pk| Cow::Borrowed(pk.as_point()))
},
&attestation.signature,
&indexed_attestation,
&fork,
@ -1047,6 +1077,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
metrics::stop_timer(signature_verification_timer);
drop(pubkey_cache);
if signature_is_valid {
// Provide the attestation to fork choice, updating the validator latest messages but
// _without_ finding and updating the head.
@ -1364,6 +1396,39 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self.log,
);
let signature_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_SIGNATURE);
let signature_verification_result = {
let validator_pubkey_cache = self
.validator_pubkey_cache
.try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
BlockSignatureVerifier::verify_entire_block(
&state,
|validator_index| {
// Disallow access to any validator pubkeys that are not in the current beacon
// state.
if validator_index < state.validators.len() {
validator_pubkey_cache
.get(validator_index)
.map(|pk| Cow::Borrowed(pk.as_point()))
} else {
None
}
},
&signed_block,
Some(block_root),
&self.spec,
)
};
if signature_verification_result.is_err() {
return Ok(BlockProcessingOutcome::InvalidSignature);
}
metrics::stop_timer(signature_timer);
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
// Apply the received block to its parent state (which has been transitioned into this
@ -1372,7 +1437,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&mut state,
&signed_block,
Some(block_root),
BlockSignatureStrategy::VerifyBulk,
// Signatures were verified earlier in this function.
BlockSignatureStrategy::NoVerification,
&self.spec,
) {
Err(BlockProcessingError::BeaconStateError(e)) => {

View File

@ -58,6 +58,8 @@ pub enum BeaconChainError {
InvalidValidatorPubkeyBytes(DecodeError),
ValidatorPubkeyCacheIncomplete(usize),
SignatureSetError(state_processing::signature_sets::Error),
BlockSignatureVerifierError(state_processing::block_signature_verifier::Error),
DuplicateValidatorPublicKey,
ValidatorPubkeyCacheFileError(String),
}

View File

@ -32,6 +32,10 @@ lazy_static! {
"beacon_block_processing_committee_building_seconds",
"Time spent building/obtaining committees for block processing."
);
pub static ref BLOCK_PROCESSING_SIGNATURE: Result<Histogram> = try_create_histogram(
"beacon_block_processing_signature_seconds",
"Time spent doing signature verification for a block."
);
pub static ref BLOCK_PROCESSING_CORE: Result<Histogram> = try_create_histogram(
"beacon_block_processing_core_seconds",
"Time spent doing the core per_block_processing state processing."

View File

@ -1,10 +1,11 @@
use crate::errors::BeaconChainError;
use ssz::{Decode, DecodeError, Encode};
use std::collections::HashMap;
use std::convert::TryInto;
use std::fs::{File, OpenOptions};
use std::io::{self, Read, Write};
use std::path::Path;
use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes};
use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes, Validator};
/// Provides a mapping of `validator_index -> validator_publickey`.
///
@ -19,6 +20,7 @@ use types::{BeaconState, EthSpec, PublicKey, PublicKeyBytes};
/// copy of itself. This allows it to be restored between process invocations.
pub struct ValidatorPubkeyCache {
pubkeys: Vec<PublicKey>,
indices: HashMap<PublicKeyBytes, usize>,
persitence_file: ValidatorPubkeyCacheFile,
}
@ -47,6 +49,7 @@ impl ValidatorPubkeyCache {
let mut cache = Self {
persitence_file: ValidatorPubkeyCacheFile::create(persistence_path)?,
pubkeys: vec![],
indices: HashMap::new(),
};
cache.import_new_pubkeys(state)?;
@ -61,38 +64,57 @@ impl ValidatorPubkeyCache {
&mut self,
state: &BeaconState<T>,
) -> Result<(), BeaconChainError> {
state
.validators
.iter()
.skip(self.pubkeys.len())
.try_for_each(|v| {
let i = self.pubkeys.len();
if state.validators.len() > self.pubkeys.len() {
self.import(&state.validators[self.pubkeys.len()..])
} else {
Ok(())
}
}
// The item is written to disk (the persistence file) _before_ it is written into
// the local struct.
//
// This means that a pubkey cache read from disk will always be equivalent to or
// _later than_ the cache that was running in the previous instance of Lighthouse.
//
// The motivation behind this ordering is that we do not want to have states that
// reference a pubkey that is not in our cache. However, it's fine to have pubkeys
// that are never referenced in a state.
self.persitence_file.append(i, &v.pubkey)?;
/// Adds zero or more validators to `self`.
fn import(&mut self, validators: &[Validator]) -> Result<(), BeaconChainError> {
self.pubkeys.reserve(validators.len());
self.indices.reserve(validators.len());
self.pubkeys.push(
(&v.pubkey)
.try_into()
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?,
);
for v in validators.iter() {
let i = self.pubkeys.len();
Ok(())
})
if self.indices.contains_key(&v.pubkey) {
return Err(BeaconChainError::DuplicateValidatorPublicKey);
}
// The item is written to disk (the persistence file) _before_ it is written into
// the local struct.
//
// This means that a pubkey cache read from disk will always be equivalent to or
// _later than_ the cache that was running in the previous instance of Lighthouse.
//
// The motivation behind this ordering is that we do not want to have states that
// reference a pubkey that is not in our cache. However, it's fine to have pubkeys
// that are never referenced in a state.
self.persitence_file.append(i, &v.pubkey)?;
self.pubkeys.push(
(&v.pubkey)
.try_into()
.map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?,
);
self.indices.insert(v.pubkey.clone(), i);
}
Ok(())
}
/// Get the public key for a validator with index `i`.
pub fn get(&self, i: usize) -> Option<&PublicKey> {
self.pubkeys.get(i)
}
/// Get the index of a validator with `pubkey`.
pub fn get_index(&self, pubkey: &PublicKeyBytes) -> Option<usize> {
self.indices.get(pubkey).copied()
}
}
/// Allows for maintaining an on-disk copy of the `ValidatorPubkeyCache`. The file is raw SSZ bytes
@ -168,12 +190,14 @@ impl ValidatorPubkeyCacheFile {
let mut last = None;
let mut pubkeys = Vec::with_capacity(list.len());
let mut indices = HashMap::new();
for (index, pubkey) in list {
let expected = last.map(|n| n + 1);
if expected.map_or(true, |expected| index == expected) {
last = Some(index);
pubkeys.push((&pubkey).try_into().map_err(Error::SszError)?);
indices.insert(pubkey, index);
} else {
return Err(Error::InconsistentIndex {
expected,
@ -184,6 +208,7 @@ impl ValidatorPubkeyCacheFile {
Ok(ValidatorPubkeyCache {
pubkeys,
indices,
persitence_file: self,
})
}
@ -221,6 +246,16 @@ mod test {
if i < validator_count {
let pubkey = cache.get(i).expect("pubkey should be present");
assert_eq!(pubkey, &keypairs[i].pk, "pubkey should match cache");
let pubkey_bytes: PublicKeyBytes = pubkey.clone().into();
assert_eq!(
i,
cache
.get_index(&pubkey_bytes)
.expect("should resolve index"),
"index should match cache"
);
} else {
assert_eq!(
cache.get(i),

View File

@ -189,11 +189,17 @@ fn return_validator_duties<T: BeaconChainTypes>(
validator_pubkeys
.into_iter()
.map(|validator_pubkey| {
if let Some(validator_index) =
state.get_validator_index(&validator_pubkey).map_err(|e| {
ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e))
// The `beacon_chain` can return a validator index that does not exist in all states.
// Therefore, we must check to ensure that the validator index is valid for our
// `state`.
let validator_index = beacon_chain
.validator_index(&validator_pubkey)
.map_err(|e| {
ApiError::ServerError(format!("Unable to get validator index: {:?}", e))
})?
{
.filter(|i| *i < state.validators.len());
if let Some(validator_index) = validator_index {
let duties = state
.get_attestation_duties(validator_index, relative_epoch)
.map_err(|e| {

View File

@ -417,6 +417,7 @@ mod release_tests {
use super::*;
use state_processing::common::{get_attesting_indices, get_base_reward};
use std::collections::BTreeSet;
use std::iter::FromIterator;
use types::test_utils::*;
use types::*;
@ -857,11 +858,15 @@ mod release_tests {
let committee = state
.get_beacon_committee(att.data.slot, att.data.index)
.expect("should get beacon committee");
let att_indices = get_attesting_indices::<MainnetEthSpec>(
committee.committee,
&fresh_validators_bitlist,
)
.unwrap();
let att_indices = BTreeSet::from_iter(
get_attesting_indices::<MainnetEthSpec>(
committee.committee,
&fresh_validators_bitlist,
)
.unwrap(),
);
let fresh_indices = &att_indices - &seen_indices;
let rewards = fresh_indices

View File

@ -15,7 +15,6 @@ serde = "1.0.102"
serde_derive = "1.0.102"
lazy_static = "1.4.0"
serde_yaml = "0.8.11"
eth2_ssz = "0.1.2"
beacon_chain = { path = "../../beacon_node/beacon_chain" }
store = { path = "../../beacon_node/store" }
@ -24,6 +23,7 @@ store = { path = "../../beacon_node/store" }
bls = { path = "../utils/bls" }
integer-sqrt = "0.1.2"
itertools = "0.8.1"
eth2_ssz = "0.1.2"
eth2_ssz_types = { path = "../utils/ssz_types" }
merkle_proof = { path = "../utils/merkle_proof" }
log = "0.4.8"

View File

@ -1,4 +1,3 @@
use std::collections::BTreeSet;
use types::*;
/// Returns validator indices which participated in the attestation, sorted by increasing index.
@ -7,17 +6,20 @@ use types::*;
pub fn get_attesting_indices<T: EthSpec>(
committee: &[usize],
bitlist: &BitList<T::MaxValidatorsPerCommittee>,
) -> Result<BTreeSet<usize>, BeaconStateError> {
) -> Result<Vec<usize>, BeaconStateError> {
if bitlist.len() != committee.len() {
return Err(BeaconStateError::InvalidBitfield);
}
Ok(committee
.iter()
.enumerate()
.filter_map(|(i, validator_index)| match bitlist.get(i) {
Ok(true) => Some(*validator_index),
_ => None,
})
.collect())
let mut indices = Vec::with_capacity(bitlist.num_set_bits());
for (i, validator_index) in committee.iter().enumerate() {
if let Ok(true) = bitlist.get(i) {
indices.push(*validator_index)
}
}
indices.sort_unstable();
Ok(indices)
}

View File

@ -10,8 +10,8 @@ pub mod test_utils;
pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations};
pub use per_block_processing::{
errors::BlockProcessingError, per_block_processing, signature_sets, BlockSignatureStrategy,
VerifySignatures,
block_signature_verifier, errors::BlockProcessingError, per_block_processing, signature_sets,
BlockSignatureStrategy, BlockSignatureVerifier, VerifySignatures,
};
pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing};
pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError};

View File

@ -1,7 +1,7 @@
use crate::common::{initiate_validator_exit, slash_validator};
use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex};
use rayon::prelude::*;
use signature_sets::{block_proposal_signature_set, randao_signature_set};
use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set};
use std::convert::TryInto;
use tree_hash::TreeHash;
use types::*;
@ -21,7 +21,7 @@ pub use verify_deposit::{
pub use verify_exit::{verify_exit, verify_exit_time_independent_only};
pub mod block_processing_builder;
mod block_signature_verifier;
pub mod block_signature_verifier;
pub mod errors;
mod is_valid_indexed_attestation;
pub mod signature_sets;
@ -83,8 +83,14 @@ pub fn per_block_processing<T: EthSpec>(
BlockSignatureStrategy::VerifyBulk => {
// Verify all signatures in the block at once.
block_verify!(
BlockSignatureVerifier::verify_entire_block(state, signed_block, block_root, spec)
.is_ok(),
BlockSignatureVerifier::verify_entire_block(
state,
|i| get_pubkey_from_state(state, i),
signed_block,
block_root,
spec
)
.is_ok(),
BlockProcessingError::BulkSignatureVerificationFailed
);
VerifySignatures::False
@ -187,7 +193,14 @@ pub fn verify_block_signature<T: EthSpec>(
spec: &ChainSpec,
) -> Result<(), BlockOperationError<HeaderInvalid>> {
verify!(
block_proposal_signature_set(state, block, block_root, spec)?.is_valid(),
block_proposal_signature_set(
state,
|i| get_pubkey_from_state(state, i),
block,
block_root,
spec
)?
.is_valid(),
HeaderInvalid::ProposalSignatureInvalid
);
@ -207,7 +220,8 @@ pub fn process_randao<T: EthSpec>(
if verify_signatures.is_true() {
// Verify RANDAO reveal signature.
block_verify!(
randao_signature_set(state, block, spec)?.is_valid(),
randao_signature_set(state, |i| get_pubkey_from_state(state, i), block, spec)?
.is_valid(),
BlockProcessingError::RandaoSignatureInvalid
);
}

View File

@ -2,8 +2,9 @@ use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetRe
use crate::common::get_indexed_attestation;
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
use bls::{verify_signature_sets, SignatureSet};
use bls::{verify_signature_sets, G1Point, SignatureSet};
use rayon::prelude::*;
use std::borrow::Cow;
use types::{
BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, IndexedAttestation,
SignedBeaconBlock,
@ -46,23 +47,34 @@ impl From<BlockOperationError<AttestationInvalid>> for Error {
///
/// This allows for optimizations related to batch BLS operations (see the
/// `Self::verify_entire_block(..)` function).
pub struct BlockSignatureVerifier<'a, T: EthSpec> {
pub struct BlockSignatureVerifier<'a, T, F>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>> + Clone,
{
block: &'a SignedBeaconBlock<T>,
get_pubkey: F,
state: &'a BeaconState<T>,
spec: &'a ChainSpec,
sets: Vec<SignatureSet<'a>>,
}
impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
impl<'a, T, F> BlockSignatureVerifier<'a, T, F>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>> + Clone,
{
/// Create a new verifier without any included signatures. See the `include...` functions to
/// add signatures, and the `verify`
pub fn new(
state: &'a BeaconState<T>,
get_pubkey: F,
block: &'a SignedBeaconBlock<T>,
spec: &'a ChainSpec,
) -> Self {
Self {
block,
get_pubkey,
state,
spec,
sets: vec![],
@ -78,11 +90,12 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
/// See `Self::verify` for more detail.
pub fn verify_entire_block(
state: &'a BeaconState<T>,
get_pubkey: F,
block: &'a SignedBeaconBlock<T>,
block_root: Option<Hash256>,
spec: &'a ChainSpec,
) -> Result<()> {
let mut verifier = Self::new(state, block, spec);
let mut verifier = Self::new(state, get_pubkey, block, spec);
verifier.include_block_proposal(block_root)?;
verifier.include_randao_reveal()?;
@ -129,14 +142,25 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
/// Includes the block signature for `self.block` for verification.
fn include_block_proposal(&mut self, block_root: Option<Hash256>) -> Result<()> {
let set = block_proposal_signature_set(self.state, self.block, block_root, self.spec)?;
let set = block_proposal_signature_set(
self.state,
self.get_pubkey.clone(),
self.block,
block_root,
self.spec,
)?;
self.sets.push(set);
Ok(())
}
/// Includes the randao signature for `self.block` for verification.
fn include_randao_reveal(&mut self) -> Result<()> {
let set = randao_signature_set(self.state, &self.block.message, self.spec)?;
let set = randao_signature_set(
self.state,
self.get_pubkey.clone(),
&self.block.message,
self.spec,
)?;
self.sets.push(set);
Ok(())
}
@ -150,8 +174,12 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
.proposer_slashings
.iter()
.map(|proposer_slashing| {
let (set_1, set_2) =
proposer_slashing_signature_set(self.state, proposer_slashing, self.spec)?;
let (set_1, set_2) = proposer_slashing_signature_set(
self.state,
self.get_pubkey.clone(),
proposer_slashing,
self.spec,
)?;
Ok(vec![set_1, set_2])
})
.collect::<SignatureSetResult<Vec<Vec<SignatureSet>>>>()?
@ -171,8 +199,12 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
.attester_slashings
.iter()
.try_for_each(|attester_slashing| {
let (set_1, set_2) =
attester_slashing_signature_sets(&self.state, attester_slashing, &self.spec)?;
let (set_1, set_2) = attester_slashing_signature_sets(
&self.state,
self.get_pubkey.clone(),
attester_slashing,
&self.spec,
)?;
self.sets.push(set_1);
self.sets.push(set_2);
@ -197,6 +229,7 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
self.sets.push(indexed_attestation_signature_set(
&self.state,
self.get_pubkey.clone(),
&attestation.signature,
&indexed_attestation,
&self.spec,
@ -216,7 +249,7 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> {
.body
.voluntary_exits
.iter()
.map(|exit| exit_signature_set(&self.state, exit, &self.spec))
.map(|exit| exit_signature_set(&self.state, self.get_pubkey.clone(), exit, &self.spec))
.collect::<SignatureSetResult<_>>()?;
self.sets.append(&mut sets);

View File

@ -1,5 +1,5 @@
use super::errors::{BlockOperationError, IndexedAttestationInvalid as Invalid};
use super::signature_sets::indexed_attestation_signature_set;
use super::signature_sets::{get_pubkey_from_state, indexed_attestation_signature_set};
use crate::VerifySignatures;
use types::*;
@ -43,6 +43,7 @@ pub fn is_valid_indexed_attestation<T: EthSpec>(
verify!(
indexed_attestation_signature_set(
state,
|i| get_pubkey_from_state(state, i),
&indexed_attestation.signature,
&indexed_attestation,
spec

View File

@ -3,6 +3,7 @@
//!
//! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`.
use bls::{G1Point, G1Ref, SignatureSet, SignedMessage};
use ssz::DecodeError;
use std::borrow::Cow;
use std::convert::TryInto;
use tree_hash::TreeHash;
@ -18,7 +19,7 @@ pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
/// Signature verification failed. The block is invalid.
SignatureInvalid,
SignatureInvalid(DecodeError),
/// There was an error attempting to read from a `BeaconState`. Block
/// validity was not determined.
BeaconStateError(BeaconStateError),
@ -39,13 +40,36 @@ impl From<BeaconStateError> for Error {
}
}
/// A signature set that is valid if a block was signed by the expected block producer.
pub fn block_proposal_signature_set<'a, T: EthSpec>(
/// Helper function to get a public key from a `state`.
pub fn get_pubkey_from_state<'a, T>(
state: &'a BeaconState<T>,
validator_index: usize,
) -> Option<Cow<'a, G1Point>>
where
T: EthSpec,
{
state
.validators
.get(validator_index)
.and_then(|v| {
let pk: Option<PublicKey> = (&v.pubkey).try_into().ok();
pk
})
.map(|pk| Cow::Owned(pk.into_point()))
}
/// A signature set that is valid if a block was signed by the expected block producer.
pub fn block_proposal_signature_set<'a, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
signed_block: &'a SignedBeaconBlock<T>,
block_root: Option<Hash256>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
) -> Result<SignatureSet<'a>>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let block = &signed_block.message;
let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
@ -68,17 +92,22 @@ pub fn block_proposal_signature_set<'a, T: EthSpec>(
Ok(SignatureSet::single(
&signed_block.signature,
validator_pubkey(state, proposer_index)?,
get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?,
message.as_bytes().to_vec(),
))
}
/// A signature set that is valid if the block proposers randao reveal signature is correct.
pub fn randao_signature_set<'a, T: EthSpec>(
pub fn randao_signature_set<'a, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
block: &'a BeaconBlock<T>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
) -> Result<SignatureSet<'a>>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
let domain = spec.get_domain(
@ -92,30 +121,37 @@ pub fn randao_signature_set<'a, T: EthSpec>(
Ok(SignatureSet::single(
&block.body.randao_reveal,
validator_pubkey(state, proposer_index)?,
get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?,
message.as_bytes().to_vec(),
))
}
/// Returns two signature sets, one for each `BlockHeader` included in the `ProposerSlashing`.
pub fn proposer_slashing_signature_set<'a, T: EthSpec>(
pub fn proposer_slashing_signature_set<'a, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
proposer_slashing: &'a ProposerSlashing,
spec: &'a ChainSpec,
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> {
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let proposer_index = proposer_slashing.signed_header_1.message.proposer_index as usize;
Ok((
block_header_signature_set(
state,
&proposer_slashing.signed_header_1,
validator_pubkey(state, proposer_index)?,
get_pubkey(proposer_index)
.ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?,
spec,
)?,
block_header_signature_set(
state,
&proposer_slashing.signed_header_2,
validator_pubkey(state, proposer_index)?,
get_pubkey(proposer_index)
.ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?,
spec,
)?,
))
@ -149,16 +185,24 @@ fn block_header_signature_set<'a, T: EthSpec>(
}
/// Returns the signature set for the given `indexed_attestation`.
pub fn indexed_attestation_signature_set<'a, 'b, T: EthSpec>(
pub fn indexed_attestation_signature_set<'a, 'b, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
signature: &'a AggregateSignature,
indexed_attestation: &'b IndexedAttestation<T>,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
) -> Result<SignatureSet<'a>>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let pubkeys = indexed_attestation
.attesting_indices
.into_iter()
.map(|&validator_idx| Ok(validator_pubkey(state, validator_idx as usize)?))
.map(|&validator_idx| {
Ok(get_pubkey(validator_idx as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_idx))?)
})
.collect::<Result<_>>()?;
let domain = spec.get_domain(
@ -176,18 +220,26 @@ pub fn indexed_attestation_signature_set<'a, 'b, T: EthSpec>(
/// Returns the signature set for the given `indexed_attestation` but pubkeys are supplied directly
/// instead of from the state.
pub fn indexed_attestation_signature_set_from_pubkeys<'a, 'b, T: EthSpec>(
pubkeys: Vec<&'a PublicKey>,
pub fn indexed_attestation_signature_set_from_pubkeys<'a, 'b, T, F>(
get_pubkey: F,
signature: &'a AggregateSignature,
indexed_attestation: &'b IndexedAttestation<T>,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
let pubkeys = pubkeys
) -> Result<SignatureSet<'a>>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let pubkeys = indexed_attestation
.attesting_indices
.into_iter()
.map(|pubkey| Cow::Borrowed(&pubkey.as_raw().point))
.collect();
.map(|&validator_idx| {
Ok(get_pubkey(validator_idx as usize)
.ok_or_else(|| Error::ValidatorUnknown(validator_idx))?)
})
.collect::<Result<_>>()?;
let domain = spec.get_domain(
indexed_attestation.data.target.epoch,
@ -203,20 +255,27 @@ pub fn indexed_attestation_signature_set_from_pubkeys<'a, 'b, T: EthSpec>(
}
/// Returns the signature set for the given `attester_slashing` and corresponding `pubkeys`.
pub fn attester_slashing_signature_sets<'a, T: EthSpec>(
pub fn attester_slashing_signature_sets<'a, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
attester_slashing: &'a AttesterSlashing<T>,
spec: &'a ChainSpec,
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> {
) -> Result<(SignatureSet<'a>, SignatureSet<'a>)>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>> + Clone,
{
Ok((
indexed_attestation_signature_set(
state,
get_pubkey.clone(),
&attester_slashing.attestation_1.signature,
&attester_slashing.attestation_1,
spec,
)?,
indexed_attestation_signature_set(
state,
get_pubkey,
&attester_slashing.attestation_2.signature,
&attester_slashing.attestation_2,
spec,
@ -256,11 +315,16 @@ pub fn deposit_signature_set<'a>(
/// Returns a signature set that is valid if the `SignedVoluntaryExit` was signed by the indicated
/// validator.
pub fn exit_signature_set<'a, T: EthSpec>(
pub fn exit_signature_set<'a, T, F>(
state: &'a BeaconState<T>,
get_pubkey: F,
signed_exit: &'a SignedVoluntaryExit,
spec: &'a ChainSpec,
) -> Result<SignatureSet<'a>> {
) -> Result<SignatureSet<'a>>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
{
let exit = &signed_exit.message;
let proposer_index = exit.validator_index as usize;
@ -275,30 +339,7 @@ pub fn exit_signature_set<'a, T: EthSpec>(
Ok(SignatureSet::single(
&signed_exit.signature,
validator_pubkey(state, proposer_index)?,
get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?,
message,
))
}
/// Maps a validator index to a `PublicKey`.
pub fn validator_pubkey<'a, T: EthSpec>(
state: &'a BeaconState<T>,
validator_index: usize,
) -> Result<Cow<'a, G1Point>> {
let pubkey_bytes = &state
.validators
.get(validator_index)
.ok_or_else(|| Error::ValidatorUnknown(validator_index as u64))?
.pubkey;
if let Some(pubkey) = pubkey_bytes.decompressed() {
Ok(Cow::Borrowed(&pubkey.as_raw().point))
} else {
pubkey_bytes
.try_into()
.map(|pubkey: PublicKey| Cow::Owned(pubkey.as_raw().point.clone()))
.map_err(|_| Error::BadBlsBytes {
validator_index: validator_index as u64,
})
}
}

View File

@ -1,5 +1,8 @@
use super::errors::{BlockOperationError, ExitInvalid};
use crate::per_block_processing::{signature_sets::exit_signature_set, VerifySignatures};
use crate::per_block_processing::{
signature_sets::{exit_signature_set, get_pubkey_from_state},
VerifySignatures,
};
use types::*;
type Result<T> = std::result::Result<T, BlockOperationError<ExitInvalid>>;
@ -84,7 +87,13 @@ fn verify_exit_parametric<T: EthSpec>(
if verify_signatures.is_true() {
verify!(
exit_signature_set(state, signed_exit, spec)?.is_valid(),
exit_signature_set(
state,
|i| get_pubkey_from_state(state, i),
signed_exit,
spec
)?
.is_valid(),
ExitInvalid::BadSignature
);
}

View File

@ -1,5 +1,5 @@
use super::errors::{BlockOperationError, ProposerSlashingInvalid as Invalid};
use super::signature_sets::proposer_slashing_signature_set;
use super::signature_sets::{get_pubkey_from_state, proposer_slashing_signature_set};
use crate::VerifySignatures;
use types::*;
@ -51,8 +51,12 @@ pub fn verify_proposer_slashing<T: EthSpec>(
);
if verify_signatures.is_true() {
let (signature_set_1, signature_set_2) =
proposer_slashing_signature_set(state, proposer_slashing, spec)?;
let (signature_set_1, signature_set_2) = proposer_slashing_signature_set(
state,
|i| get_pubkey_from_state(state, i),
proposer_slashing,
spec,
)?;
verify!(signature_set_1.is_valid(), Invalid::BadProposal1Signature);
verify!(signature_set_2.is_valid(), Invalid::BadProposal2Signature);
}

View File

@ -772,7 +772,6 @@ impl<T: EthSpec> BeaconState<T> {
self.update_pubkey_cache()?;
self.build_tree_hash_cache()?;
self.exit_cache.build(&self.validators, spec)?;
self.decompress_validator_pubkeys()?;
Ok(())
}
@ -959,23 +958,6 @@ impl<T: EthSpec> BeaconState<T> {
self.tree_hash_cache = None;
}
/// Iterate through all validators and decompress their public key, unless it has already been
/// decompressed.
///
/// Does not check the validity of already decompressed keys.
pub fn decompress_validator_pubkeys(&mut self) -> Result<(), Error> {
self.validators.iter_mut().try_for_each(|validator| {
if validator.pubkey.decompressed().is_none() {
validator
.pubkey
.decompress()
.map_err(Error::InvalidValidatorPubkey)
} else {
Ok(())
}
})
}
/// Clone the state whilst preserving only the selected caches.
pub fn clone_with(&self, config: CloneConfig) -> Self {
BeaconState {

View File

@ -22,6 +22,13 @@ impl FakeAggregatePublicKey {
Self::zero()
}
pub fn empty_signature() -> Self {
Self {
bytes: vec![0; BLS_PUBLIC_KEY_BYTE_SIZE],
point: G1Point::new(),
}
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
if bytes.len() != BLS_PUBLIC_KEY_BYTE_SIZE {
Err(DecodeError::InvalidByteLength {

View File

@ -85,6 +85,14 @@ impl FakePublicKey {
pub fn as_raw(&self) -> &Self {
self
}
pub fn as_point(&self) -> &G1Point {
&self.point
}
pub fn into_point(self) -> G1Point {
self.point
}
}
impl fmt::Display for FakePublicKey {

View File

@ -92,7 +92,6 @@ macro_rules! bytes_struct {
#[derive(Clone)]
pub struct $name {
bytes: [u8; $byte_size],
decompressed: Option<$type>
}
};
($name: ident, $type: ty, $byte_size: expr, $small_name: expr) => {
@ -103,14 +102,12 @@ macro_rules! bytes_struct {
pub fn from_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
Ok(Self {
bytes: Self::get_bytes(bytes)?,
decompressed: None
})
}
pub fn empty() -> Self {
Self {
bytes: [0; $byte_size],
decompressed: None
}
}
@ -134,15 +131,6 @@ macro_rules! bytes_struct {
Ok(result)
}
}
pub fn decompress(&mut self) -> Result<(), ssz::DecodeError> {
self.decompressed = Some(<&Self as std::convert::TryInto<$type>>::try_into(self)?);
Ok(())
}
pub fn decompressed(&self) -> &Option<$type> {
&self.decompressed
}
}
impl std::fmt::Debug for $name {
@ -180,7 +168,44 @@ macro_rules! bytes_struct {
}
}
impl_ssz!($name, $byte_size, "$type");
impl ssz::Encode for $name {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
$byte_size
}
fn ssz_bytes_len(&self) -> usize {
$byte_size
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
buf.extend_from_slice(&self.bytes)
}
}
impl ssz::Decode for $name {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
$byte_size
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
let len = bytes.len();
let expected = <Self as ssz::Decode>::ssz_fixed_len();
if len != expected {
Err(ssz::DecodeError::InvalidByteLength { len, expected })
} else {
Self::from_bytes(bytes)
}
}
}
impl tree_hash::TreeHash for $name {
fn tree_hash_type() -> tree_hash::TreeHashType {

View File

@ -1,5 +1,5 @@
use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE};
use milagro_bls::PublicKey as RawPublicKey;
use milagro_bls::{G1Point, PublicKey as RawPublicKey};
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use serde_hex::{encode as hex_encode, PrefixedHexVisitor};
@ -24,11 +24,21 @@ impl PublicKey {
Self(raw)
}
/// Returns the underlying signature.
/// Returns a reference to the underlying signature.
pub fn as_raw(&self) -> &RawPublicKey {
&self.0
}
/// Consumes self and returns the underlying signature.
pub fn as_point(&self) -> &G1Point {
&self.0.point
}
/// Consumes self and returns the underlying signature.
pub fn into_point(self) -> G1Point {
self.0.point
}
/// Returns the underlying point as compressed bytes.
///
/// Identical to `self.as_uncompressed_bytes()`.

View File

@ -22,6 +22,10 @@ fn eth2_testnet_config() -> Eth2TestnetConfig<MainnetEthSpec> {
Eth2TestnetConfig::hard_coded().expect("should decode hard_coded params")
}
/*
*
* TODO: disabled until hardcoded testnet config is updated for v0.11
*
mod setup_eth2_config {
use super::*;
@ -85,3 +89,4 @@ mod setup_eth2_config {
);
}
}
*/