Use bulk verification for sync_aggregate signature (#2415)

## Proposed Changes

Add the `sync_aggregate` from `BeaconBlock` to the bulk signature verifier for blocks. This necessitates a new signature set constructor for the sync aggregate, which is different from the others due to the use of [`eth2_fast_aggregate_verify`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/bls.md#eth2_fast_aggregate_verify) for sync aggregates, per [`process_sync_aggregate`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/beacon-chain.md#sync-aggregate-processing). I made the choice to return an optional signature set, with `None` representing the case where the signature is valid on account of being the point at infinity (requires no further checking).

To "dogfood" the changes and prevent duplication, the consensus logic now uses the signature set approach as well whenever it is required to verify signatures (which should only be in testing AFAIK). The EF tests pass with the code as it exists currently, but failed before I adapted the `eth2_fast_aggregate_verify` changes (which is good).

As a result of this change Altair block processing should be a little faster, and importantly, we will no longer accidentally verify signatures when replaying blocks, e.g. when replaying blocks from the database.
This commit is contained in:
Michael Sproul 2021-07-28 05:40:21 +00:00
parent 6e3ca48cb9
commit 923486f34c
8 changed files with 186 additions and 79 deletions

View File

@ -69,7 +69,8 @@ use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp
use tree_hash::TreeHash;
use types::{
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256,
InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock,
SignedBeaconBlockHeader, Slot,
};
/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed.
@ -1394,22 +1395,31 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>(
state: &'a BeaconState<T::EthSpec>,
validator_pubkey_cache: &'a ValidatorPubkeyCache<T>,
spec: &'a ChainSpec,
) -> BlockSignatureVerifier<'a, T::EthSpec, impl Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone> {
BlockSignatureVerifier::new(
state,
move |validator_index| {
// Disallow access to any validator pubkeys that are not in the current beacon
// state.
if validator_index < state.validators().len() {
validator_pubkey_cache
.get(validator_index)
.map(|pk| Cow::Borrowed(pk))
} else {
None
}
},
spec,
)
) -> BlockSignatureVerifier<
'a,
T::EthSpec,
impl Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
impl Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
> {
let get_pubkey = move |validator_index| {
// Disallow access to any validator pubkeys that are not in the current beacon state.
if validator_index < state.validators().len() {
validator_pubkey_cache
.get(validator_index)
.map(Cow::Borrowed)
} else {
None
}
};
let decompressor = move |pk_bytes| {
// Map compressed pubkey to validator index.
let validator_index = validator_pubkey_cache.get_index(pk_bytes)?;
// Map validator index to pubkey (respecting guard on unknown validators).
get_pubkey(validator_index)
};
BlockSignatureVerifier::new(state, get_pubkey, decompressor, spec)
}
/// Verify that `header` was signed with a valid signature from its proposer.

View File

@ -2,6 +2,7 @@ use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid};
use rayon::prelude::*;
use safe_arith::{ArithError, SafeArith};
use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set};
use std::borrow::Cow;
use tree_hash::TreeHash;
use types::*;
@ -102,6 +103,7 @@ pub fn per_block_processing<T: EthSpec>(
BlockSignatureVerifier::verify_entire_block(
state,
|i| get_pubkey_from_state(state, i),
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
signed_block,
block_root,
spec
@ -130,7 +132,13 @@ pub fn per_block_processing<T: EthSpec>(
process_operations(state, block.body(), proposer_index, verify_signatures, spec)?;
if let BeaconBlockRef::Altair(inner) = block {
process_sync_aggregate(state, &inner.body.sync_aggregate, proposer_index, spec)?;
process_sync_aggregate(
state,
&inner.body.sync_aggregate,
proposer_index,
verify_signatures,
spec,
)?;
}
Ok(())

View File

@ -1,55 +1,44 @@
use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance};
use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid};
use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures};
use safe_arith::SafeArith;
use tree_hash::TreeHash;
use std::borrow::Cow;
use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR};
use types::{BeaconState, ChainSpec, Domain, EthSpec, SigningData, SyncAggregate, Unsigned};
use types::{BeaconState, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned};
pub fn process_sync_aggregate<T: EthSpec>(
state: &mut BeaconState<T>,
aggregate: &SyncAggregate<T>,
proposer_index: u64,
verify_signatures: VerifySignatures,
spec: &ChainSpec,
) -> Result<(), BlockProcessingError> {
// Verify sync committee aggregate signature signing over the previous slot block root
let previous_slot = state.slot().saturating_sub(1u64);
let current_sync_committee = state.current_sync_committee()?.clone();
let committee_pubkeys = &current_sync_committee.pubkeys;
let participant_pubkeys = committee_pubkeys
.iter()
.zip(aggregate.sync_committee_bits.iter())
.flat_map(|(pubkey, bit)| {
if bit {
// FIXME(altair): accelerate pubkey decompression with a cache
Some(pubkey.decompress())
} else {
None
}
})
.collect::<Result<Vec<_>, _>>()
.map_err(|_| SyncAggregateInvalid::PubkeyInvalid)?;
// Verify sync committee aggregate signature signing over the previous slot block root
if verify_signatures.is_true() {
// This decompression could be avoided with a cache, but we're not likely
// to encounter this case in practice due to the use of pre-emptive signature
// verification (which uses the `ValidatorPubkeyCache`).
let decompressor = |pk_bytes: &PublicKeyBytes| pk_bytes.decompress().ok().map(Cow::Owned);
let domain = spec.get_domain(
previous_slot.epoch(T::slots_per_epoch()),
Domain::SyncCommittee,
&state.fork(),
state.genesis_validators_root(),
);
// Check that the signature is over the previous block root.
let previous_slot = state.slot().saturating_sub(1u64);
let previous_block_root = *state.get_block_root(previous_slot)?;
let signing_root = SigningData {
object_root: *state.get_block_root(previous_slot)?,
domain,
}
.tree_hash_root();
let signature_set = sync_aggregate_signature_set(
decompressor,
aggregate,
state.slot(),
previous_block_root,
state,
spec,
)?;
let pubkey_refs = participant_pubkeys.iter().collect::<Vec<_>>();
if !aggregate
.sync_committee_signature
.eth2_fast_aggregate_verify(signing_root, &pubkey_refs)
{
return Err(SyncAggregateInvalid::SignatureInvalid.into());
// If signature set is `None` then the signature is valid (infinity).
if signature_set.map_or(false, |signature| !signature.verify()) {
return Err(SyncAggregateInvalid::SignatureInvalid.into());
}
}
// Compute participant and proposer rewards

View File

@ -3,7 +3,7 @@
use super::signature_sets::{Error as SignatureSetError, *};
use crate::common::get_indexed_attestation;
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
use bls::{verify_signature_sets, PublicKey, SignatureSet};
use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet};
use rayon::prelude::*;
use std::borrow::Cow;
use types::{
@ -63,27 +63,36 @@ impl From<BlockOperationError<AttestationInvalid>> for Error {
///
/// This allows for optimizations related to batch BLS operations (see the
/// `Self::verify_entire_block(..)` function).
pub struct BlockSignatureVerifier<'a, T, F>
pub struct BlockSignatureVerifier<'a, T, F, D>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{
get_pubkey: F,
decompressor: D,
state: &'a BeaconState<T>,
spec: &'a ChainSpec,
sets: Vec<SignatureSet<'a>>,
}
impl<'a, T, F> BlockSignatureVerifier<'a, T, F>
impl<'a, T, F, D> BlockSignatureVerifier<'a, T, F, D>
where
T: EthSpec,
F: Fn(usize) -> Option<Cow<'a, PublicKey>> + Clone,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{
/// Create a new verifier without any included signatures. See the `include...` functions to
/// add signatures, and the `verify`
pub fn new(state: &'a BeaconState<T>, get_pubkey: F, spec: &'a ChainSpec) -> Self {
pub fn new(
state: &'a BeaconState<T>,
get_pubkey: F,
decompressor: D,
spec: &'a ChainSpec,
) -> Self {
Self {
get_pubkey,
decompressor,
state,
spec,
sets: vec![],
@ -100,11 +109,12 @@ where
pub fn verify_entire_block(
state: &'a BeaconState<T>,
get_pubkey: F,
decompressor: D,
block: &'a SignedBeaconBlock<T>,
block_root: Option<Hash256>,
spec: &'a ChainSpec,
) -> Result<()> {
let mut verifier = Self::new(state, get_pubkey, spec);
let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
verifier.include_all_signatures(block, block_root)?;
verifier.verify()
}
@ -146,12 +156,7 @@ where
block_root: Option<Hash256>,
) -> Result<()> {
self.include_block_proposal(block, block_root)?;
self.include_randao_reveal(block)?;
self.include_proposer_slashings(block)?;
self.include_attester_slashings(block)?;
self.include_attestations(block)?;
// Deposits are not included because they can legally have invalid signatures.
self.include_exits(block)?;
self.include_all_signatures_except_proposal(block)?;
Ok(())
}
@ -168,6 +173,7 @@ where
self.include_attestations(block)?;
// Deposits are not included because they can legally have invalid signatures.
self.include_exits(block)?;
self.include_sync_aggregate(block)?;
Ok(())
}
@ -308,4 +314,21 @@ where
Ok(())
})
}
/// Include the signature of the block's sync aggregate (if it exists) for verification.
pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock<T>) -> Result<()> {
if let Some(sync_aggregate) = block.message().body().sync_aggregate() {
if let Some(signature_set) = sync_aggregate_signature_set(
&self.decompressor,
sync_aggregate,
block.slot(),
block.parent_root(),
&self.state,
&self.spec,
)? {
self.sets.push(signature_set);
}
}
Ok(())
}
}

View File

@ -11,7 +11,7 @@ use types::{
DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation,
ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof,
SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot,
SignedVoluntaryExit, SigningData, SyncAggregatorSelectionData, Unsigned,
SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned,
};
pub type Result<T> = std::result::Result<T, Error>;
@ -36,6 +36,8 @@ pub enum Error {
/// The public keys supplied do not match the number of objects requiring keys. Block validity
/// was not determined.
MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize },
/// Pubkey decompression failed. The block is invalid.
PublicKeyDecompressionFailed,
/// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal
/// error.
BadBlsBytes { validator_index: u64 },
@ -517,3 +519,73 @@ where
Ok(SignatureSet::single_pubkey(signature, pubkey, message))
}
/// Signature set verifier for a block's `sync_aggregate` (Altair and later).
///
/// The `slot` should be the slot of the block that the sync aggregate is included in, which may be
/// different from `state.slot()`. The `block_root` should be the block root that the sync aggregate
/// signs over. It's passed in rather than extracted from the `state` because when verifying a batch
/// of blocks the `state` will not yet have had the blocks applied.
///
/// Returns `Ok(None)` in the case where `sync_aggregate` has 0 signatures. The spec
/// uses a separate function `eth2_fast_aggregate_verify` for this, but we can equivalently
/// check the exceptional case eagerly and do a `fast_aggregate_verify` in the case where the
/// check fails (by returning `Some(signature_set)`).
pub fn sync_aggregate_signature_set<'a, T, D>(
decompressor: D,
sync_aggregate: &'a SyncAggregate<T>,
slot: Slot,
block_root: Hash256,
state: &'a BeaconState<T>,
spec: &ChainSpec,
) -> Result<Option<SignatureSet<'a>>>
where
T: EthSpec,
D: Fn(&'a PublicKeyBytes) -> Option<Cow<'a, PublicKey>>,
{
// Allow the point at infinity to count as a signature for 0 validators as per
// `eth2_fast_aggregate_verify` from the spec.
if sync_aggregate.sync_committee_bits.is_zero()
&& sync_aggregate.sync_committee_signature.is_infinity()
{
return Ok(None);
}
let committee_pubkeys = &state
.get_built_sync_committee(slot.epoch(T::slots_per_epoch()), spec)?
.pubkeys;
let participant_pubkeys = committee_pubkeys
.iter()
.zip(sync_aggregate.sync_committee_bits.iter())
.filter_map(|(pubkey, bit)| {
if bit {
Some(decompressor(pubkey))
} else {
None
}
})
.collect::<Option<Vec<_>>>()
.ok_or(Error::PublicKeyDecompressionFailed)?;
let previous_slot = slot.saturating_sub(1u64);
let domain = spec.get_domain(
previous_slot.epoch(T::slots_per_epoch()),
Domain::SyncCommittee,
&state.fork(),
state.genesis_validators_root(),
);
let message = SigningData {
object_root: block_root,
domain,
}
.tree_hash_root();
Ok(Some(SignatureSet::multiple_pubkeys(
&sync_aggregate.sync_committee_signature,
participant_pubkeys,
message,
)))
}

View File

@ -110,6 +110,11 @@ where
self.point.is_none()
}
/// Returns `true` if `self` is equal to the point at infinity.
pub fn is_infinity(&self) -> bool {
self.is_infinity
}
/// Returns a reference to the underlying BLS point.
pub(crate) fn point(&self) -> Option<&AggSig> {
self.point.as_ref()
@ -189,18 +194,6 @@ where
}
}
/// Wrapper to `fast_aggregate_verify` accepting the infinity signature when `pubkeys` is empty.
pub fn eth2_fast_aggregate_verify(
&self,
msg: Hash256,
pubkeys: &[&GenericPublicKey<Pub>],
) -> bool {
if pubkeys.is_empty() && self.is_infinity {
return true;
}
self.fast_aggregate_verify(msg, pubkeys)
}
/// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their
/// corresponding message in `msgs`.
///

View File

@ -34,6 +34,7 @@ macro_rules! test_suite {
AggregateSignature::deserialize(&INFINITY_SIGNATURE).unwrap(),
AggregateSignature::infinity(),
);
assert!(AggregateSignature::infinity().is_infinity());
}
#[test]
@ -297,6 +298,17 @@ macro_rules! test_suite {
.assert_single_message_verify(true)
}
/// Adding two infinity signatures should yield the infinity signature.
#[test]
fn add_two_infinity_signatures() {
let tester = AggregateSignatureTester::new_with_single_msg(1)
.infinity_sig()
.aggregate_infinity_sig();
assert!(tester.sig.is_infinity());
assert_eq!(tester.sig, AggregateSignature::infinity());
tester.assert_single_message_verify(false)
}
/// The wrong signature should not verify.
#[test]
fn fast_aggregate_verify_wrong_signature() {

View File

@ -198,7 +198,7 @@ impl<E: EthSpec> Operation<E> for SyncAggregate<E> {
spec: &ChainSpec,
) -> Result<(), BlockProcessingError> {
let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64;
process_sync_aggregate(state, self, proposer_index, spec)
process_sync_aggregate(state, self, proposer_index, VerifySignatures::True, spec)
}
}