Use bulk verification for sync_aggregate signature (#2415)

## Proposed Changes

Add the `sync_aggregate` from `BeaconBlock` to the bulk signature verifier for blocks. This necessitates a new signature set constructor for the sync aggregate, which is different from the others due to the use of [`eth2_fast_aggregate_verify`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/bls.md#eth2_fast_aggregate_verify) for sync aggregates, per [`process_sync_aggregate`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/beacon-chain.md#sync-aggregate-processing). I made the choice to return an optional signature set, with `None` representing the case where the signature is valid on account of being the point at infinity (requires no further checking).

To "dogfood" the changes and prevent duplication, the consensus logic now uses the signature set approach as well whenever it is required to verify signatures (which should only be in testing AFAIK). The EF tests pass with the code as it exists currently, but failed before I adapted the `eth2_fast_aggregate_verify` changes (which is good).

As a result of this change Altair block processing should be a little faster, and importantly, we will no longer accidentally verify signatures when replaying blocks, e.g. when replaying blocks from the database.
This commit is contained in:
Michael Sproul 2021-07-28 05:40:21 +00:00
parent 6e3ca48cb9
commit 923486f34c
8 changed files with 186 additions and 79 deletions

View File

@ -69,7 +69,8 @@ use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256,
InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock,
SignedBeaconBlockHeader, Slot,
}; };
/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed.
@ -1394,22 +1395,31 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>(
state: &'a BeaconState<T::EthSpec>, state: &'a BeaconState<T::EthSpec>,
validator_pubkey_cache: &'a ValidatorPubkeyCache<T>, validator_pubkey_cache: &'a ValidatorPubkeyCache<T>,
spec: &'a ChainSpec, spec: &'a ChainSpec,
) -> BlockSignatureVerifier<'a, T::EthSpec, impl Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone> { ) -> BlockSignatureVerifier<
BlockSignatureVerifier::new( 'a,
state, T::EthSpec,
move |validator_index| { impl Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
// Disallow access to any validator pubkeys that are not in the current beacon impl Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
// state. > {
if validator_index < state.validators().len() { let get_pubkey = move |validator_index| {
validator_pubkey_cache // Disallow access to any validator pubkeys that are not in the current beacon state.
.get(validator_index) if validator_index < state.validators().len() {
.map(|pk| Cow::Borrowed(pk)) validator_pubkey_cache
} else { .get(validator_index)
None .map(Cow::Borrowed)
} } else {
}, None
spec, }
) };
let decompressor = move |pk_bytes| {
// Map compressed pubkey to validator index.
let validator_index = validator_pubkey_cache.get_index(pk_bytes)?;
// Map validator index to pubkey (respecting guard on unknown validators).
get_pubkey(validator_index)
};
BlockSignatureVerifier::new(state, get_pubkey, decompressor, spec)
} }
/// Verify that `header` was signed with a valid signature from its proposer. /// Verify that `header` was signed with a valid signature from its proposer.

View File

@ -2,6 +2,7 @@ use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid};
use rayon::prelude::*; use rayon::prelude::*;
use safe_arith::{ArithError, SafeArith}; use safe_arith::{ArithError, SafeArith};
use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set};
use std::borrow::Cow;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::*; use types::*;
@ -102,6 +103,7 @@ pub fn per_block_processing<T: EthSpec>(
BlockSignatureVerifier::verify_entire_block( BlockSignatureVerifier::verify_entire_block(
state, state,
|i| get_pubkey_from_state(state, i), |i| get_pubkey_from_state(state, i),
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
signed_block, signed_block,
block_root, block_root,
spec spec
@ -130,7 +132,13 @@ pub fn per_block_processing<T: EthSpec>(
process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; process_operations(state, block.body(), proposer_index, verify_signatures, spec)?;
if let BeaconBlockRef::Altair(inner) = block { if let BeaconBlockRef::Altair(inner) = block {
process_sync_aggregate(state, &inner.body.sync_aggregate, proposer_index, spec)?; process_sync_aggregate(
state,
&inner.body.sync_aggregate,
proposer_index,
verify_signatures,
spec,
)?;
} }
Ok(()) Ok(())

View File

@ -1,55 +1,44 @@
use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance};
use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid};
use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures};
use safe_arith::SafeArith; use safe_arith::SafeArith;
use tree_hash::TreeHash; use std::borrow::Cow;
use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR};
use types::{BeaconState, ChainSpec, Domain, EthSpec, SigningData, SyncAggregate, Unsigned}; use types::{BeaconState, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned};
pub fn process_sync_aggregate<T: EthSpec>( pub fn process_sync_aggregate<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
aggregate: &SyncAggregate<T>, aggregate: &SyncAggregate<T>,
proposer_index: u64, proposer_index: u64,
verify_signatures: VerifySignatures,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), BlockProcessingError> { ) -> Result<(), BlockProcessingError> {
// Verify sync committee aggregate signature signing over the previous slot block root
let previous_slot = state.slot().saturating_sub(1u64);
let current_sync_committee = state.current_sync_committee()?.clone(); let current_sync_committee = state.current_sync_committee()?.clone();
let committee_pubkeys = &current_sync_committee.pubkeys;
let participant_pubkeys = committee_pubkeys // Verify sync committee aggregate signature signing over the previous slot block root
.iter() if verify_signatures.is_true() {
.zip(aggregate.sync_committee_bits.iter()) // This decompression could be avoided with a cache, but we're not likely
.flat_map(|(pubkey, bit)| { // to encounter this case in practice due to the use of pre-emptive signature
if bit { // verification (which uses the `ValidatorPubkeyCache`).
// FIXME(altair): accelerate pubkey decompression with a cache let decompressor = |pk_bytes: &PublicKeyBytes| pk_bytes.decompress().ok().map(Cow::Owned);
Some(pubkey.decompress())
} else {
None
}
})
.collect::<Result<Vec<_>, _>>()
.map_err(|_| SyncAggregateInvalid::PubkeyInvalid)?;
let domain = spec.get_domain( // Check that the signature is over the previous block root.
previous_slot.epoch(T::slots_per_epoch()), let previous_slot = state.slot().saturating_sub(1u64);
Domain::SyncCommittee, let previous_block_root = *state.get_block_root(previous_slot)?;
&state.fork(),
state.genesis_validators_root(),
);
let signing_root = SigningData { let signature_set = sync_aggregate_signature_set(
object_root: *state.get_block_root(previous_slot)?, decompressor,
domain, aggregate,
} state.slot(),
.tree_hash_root(); previous_block_root,
state,
spec,
)?;
let pubkey_refs = participant_pubkeys.iter().collect::<Vec<_>>(); // If signature set is `None` then the signature is valid (infinity).
if !aggregate if signature_set.map_or(false, |signature| !signature.verify()) {
.sync_committee_signature return Err(SyncAggregateInvalid::SignatureInvalid.into());
.eth2_fast_aggregate_verify(signing_root, &pubkey_refs) }
{
return Err(SyncAggregateInvalid::SignatureInvalid.into());
} }
// Compute participant and proposer rewards // Compute participant and proposer rewards

View File

@ -3,7 +3,7 @@
use super::signature_sets::{Error as SignatureSetError, *}; use super::signature_sets::{Error as SignatureSetError, *};
use crate::common::get_indexed_attestation; use crate::common::get_indexed_attestation;
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
use bls::{verify_signature_sets, PublicKey, SignatureSet}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet};
use rayon::prelude::*; use rayon::prelude::*;
use std::borrow::Cow; use std::borrow::Cow;
use types::{ use types::{
@ -63,27 +63,36 @@ impl From<BlockOperationError<AttestationInvalid>> for Error {
/// ///
/// This allows for optimizations related to batch BLS operations (see the /// This allows for optimizations related to batch BLS operations (see the
/// `Self::verify_entire_block(..)` function). /// `Self::verify_entire_block(..)` function).
pub struct BlockSignatureVerifier<'a, T, F> pub struct BlockSignatureVerifier<'a, T, F, D>
where where
T: EthSpec, T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone, F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{ {
get_pubkey: F, get_pubkey: F,
decompressor: D,
state: &'a BeaconState<T>, state: &'a BeaconState<T>,
spec: &'a ChainSpec, spec: &'a ChainSpec,
sets: Vec<SignatureSet<'a>>, sets: Vec<SignatureSet<'a>>,
} }
impl<'a, T, F> BlockSignatureVerifier<'a, T, F> impl<'a, T, F, D> BlockSignatureVerifier<'a, T, F, D>
where where
T: EthSpec, T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone, F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{ {
/// Create a new verifier without any included signatures. See the `include...` functions to /// Create a new verifier without any included signatures. See the `include...` functions to
/// add signatures, and the `verify` /// add signatures, and the `verify`
pub fn new(state: &'a BeaconState<T>, get_pubkey: F, spec: &'a ChainSpec) -> Self { pub fn new(
state: &'a BeaconState<T>,
get_pubkey: F,
decompressor: D,
spec: &'a ChainSpec,
) -> Self {
Self { Self {
get_pubkey, get_pubkey,
decompressor,
state, state,
spec, spec,
sets: vec![], sets: vec![],
@ -100,11 +109,12 @@ where
pub fn verify_entire_block( pub fn verify_entire_block(
state: &'a BeaconState<T>, state: &'a BeaconState<T>,
get_pubkey: F, get_pubkey: F,
decompressor: D,
block: &'a SignedBeaconBlock<T>, block: &'a SignedBeaconBlock<T>,
block_root: Option<Hash256>, block_root: Option<Hash256>,
spec: &'a ChainSpec, spec: &'a ChainSpec,
) -> Result<()> { ) -> Result<()> {
let mut verifier = Self::new(state, get_pubkey, spec); let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
verifier.include_all_signatures(block, block_root)?; verifier.include_all_signatures(block, block_root)?;
verifier.verify() verifier.verify()
} }
@ -146,12 +156,7 @@ where
block_root: Option<Hash256>, block_root: Option<Hash256>,
) -> Result<()> { ) -> Result<()> {
self.include_block_proposal(block, block_root)?; self.include_block_proposal(block, block_root)?;
self.include_randao_reveal(block)?; self.include_all_signatures_except_proposal(block)?;
self.include_proposer_slashings(block)?;
self.include_attester_slashings(block)?;
self.include_attestations(block)?;
// Deposits are not included because they can legally have invalid signatures.
self.include_exits(block)?;
Ok(()) Ok(())
} }
@ -168,6 +173,7 @@ where
self.include_attestations(block)?; self.include_attestations(block)?;
// Deposits are not included because they can legally have invalid signatures. // Deposits are not included because they can legally have invalid signatures.
self.include_exits(block)?; self.include_exits(block)?;
self.include_sync_aggregate(block)?;
Ok(()) Ok(())
} }
@ -308,4 +314,21 @@ where
Ok(()) Ok(())
}) })
} }
/// Include the signature of the block's sync aggregate (if it exists) for verification.
pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock<T>) -> Result<()> {
if let Some(sync_aggregate) = block.message().body().sync_aggregate() {
if let Some(signature_set) = sync_aggregate_signature_set(
&self.decompressor,
sync_aggregate,
block.slot(),
block.parent_root(),
&self.state,
&self.spec,
)? {
self.sets.push(signature_set);
}
}
Ok(())
}
} }

View File

@ -11,7 +11,7 @@ use types::{
DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation,
ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof,
SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot,
SignedVoluntaryExit, SigningData, SyncAggregatorSelectionData, Unsigned, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned,
}; };
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
@ -36,6 +36,8 @@ pub enum Error {
/// The public keys supplied do not match the number of objects requiring keys. Block validity /// The public keys supplied do not match the number of objects requiring keys. Block validity
/// was not determined. /// was not determined.
MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize }, MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize },
/// Pubkey decompression failed. The block is invalid.
PublicKeyDecompressionFailed,
/// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal /// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal
/// error. /// error.
BadBlsBytes { validator_index: u64 }, BadBlsBytes { validator_index: u64 },
@ -517,3 +519,73 @@ where
Ok(SignatureSet::single_pubkey(signature, pubkey, message)) Ok(SignatureSet::single_pubkey(signature, pubkey, message))
} }
/// Signature set verifier for a block's `sync_aggregate` (Altair and later).
///
/// The `slot` should be the slot of the block that the sync aggregate is included in, which may be
/// different from `state.slot()`. The `block_root` should be the block root that the sync aggregate
/// signs over. It's passed in rather than extracted from the `state` because when verifying a batch
/// of blocks the `state` will not yet have had the blocks applied.
///
/// Returns `Ok(None)` in the case where `sync_aggregate` has 0 signatures. The spec
/// uses a separate function `eth2_fast_aggregate_verify` for this, but we can equivalently
/// check the exceptional case eagerly and do a `fast_aggregate_verify` in the case where the
/// check fails (by returning `Some(signature_set)`).
pub fn sync_aggregate_signature_set<'a, T, D>(
decompressor: D,
sync_aggregate: &'a SyncAggregate<T>,
slot: Slot,
block_root: Hash256,
state: &'a BeaconState<T>,
spec: &ChainSpec,
) -> Result<Option<SignatureSet<'a>>>
where
T: EthSpec,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{
// Allow the point at infinity to count as a signature for 0 validators as per
// `eth2_fast_aggregate_verify` from the spec.
if sync_aggregate.sync_committee_bits.is_zero()
&& sync_aggregate.sync_committee_signature.is_infinity()
{
return Ok(None);
}
let committee_pubkeys = &state
.get_built_sync_committee(slot.epoch(T::slots_per_epoch()), spec)?
.pubkeys;
let participant_pubkeys = committee_pubkeys
.iter()
.zip(sync_aggregate.sync_committee_bits.iter())
.filter_map(|(pubkey, bit)| {
if bit {
Some(decompressor(pubkey))
} else {
None
}
})
.collect::<Option<Vec<_>>>()
.ok_or(Error::PublicKeyDecompressionFailed)?;
let previous_slot = slot.saturating_sub(1u64);
let domain = spec.get_domain(
previous_slot.epoch(T::slots_per_epoch()),
Domain::SyncCommittee,
&state.fork(),
state.genesis_validators_root(),
);
let message = SigningData {
object_root: block_root,
domain,
}
.tree_hash_root();
Ok(Some(SignatureSet::multiple_pubkeys(
&sync_aggregate.sync_committee_signature,
participant_pubkeys,
message,
)))
}

View File

@ -110,6 +110,11 @@ where
self.point.is_none() self.point.is_none()
} }
/// Returns `true` if `self` is equal to the point at infinity.
pub fn is_infinity(&self) -> bool {
self.is_infinity
}
/// Returns a reference to the underlying BLS point. /// Returns a reference to the underlying BLS point.
pub(crate) fn point(&self) -> Option<&AggSig> { pub(crate) fn point(&self) -> Option<&AggSig> {
self.point.as_ref() self.point.as_ref()
@ -189,18 +194,6 @@ where
} }
} }
/// Wrapper to `fast_aggregate_verify` accepting the infinity signature when `pubkeys` is empty.
pub fn eth2_fast_aggregate_verify(
&self,
msg: Hash256,
pubkeys: &[&GenericPublicKey<Pub>],
) -> bool {
if pubkeys.is_empty() && self.is_infinity {
return true;
}
self.fast_aggregate_verify(msg, pubkeys)
}
/// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their /// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their
/// corresponding message in `msgs`. /// corresponding message in `msgs`.
/// ///

View File

@ -34,6 +34,7 @@ macro_rules! test_suite {
AggregateSignature::deserialize(&INFINITY_SIGNATURE).unwrap(), AggregateSignature::deserialize(&INFINITY_SIGNATURE).unwrap(),
AggregateSignature::infinity(), AggregateSignature::infinity(),
); );
assert!(AggregateSignature::infinity().is_infinity());
} }
#[test] #[test]
@ -297,6 +298,17 @@ macro_rules! test_suite {
.assert_single_message_verify(true) .assert_single_message_verify(true)
} }
/// Adding two infinity signatures should yield the infinity signature.
#[test]
fn add_two_infinity_signatures() {
let tester = AggregateSignatureTester::new_with_single_msg(1)
.infinity_sig()
.aggregate_infinity_sig();
assert!(tester.sig.is_infinity());
assert_eq!(tester.sig, AggregateSignature::infinity());
tester.assert_single_message_verify(false)
}
/// The wrong signature should not verify. /// The wrong signature should not verify.
#[test] #[test]
fn fast_aggregate_verify_wrong_signature() { fn fast_aggregate_verify_wrong_signature() {

View File

@ -198,7 +198,7 @@ impl<E: EthSpec> Operation<E> for SyncAggregate<E> {
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), BlockProcessingError> { ) -> Result<(), BlockProcessingError> {
let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64;
process_sync_aggregate(state, self, proposer_index, spec) process_sync_aggregate(state, self, proposer_index, VerifySignatures::True, spec)
} }
} }