diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 72b5b58bb..beabd898d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2313,32 +2313,74 @@ impl BeaconChain { } /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. - pub fn verify_bls_to_execution_change_for_gossip( + pub fn verify_bls_to_execution_change_for_http_api( &self, bls_to_execution_change: SignedBlsToExecutionChange, ) -> Result, Error> { - let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { - // Disallow BLS to execution changes prior to the Capella fork. - return Err(Error::BlsToExecutionChangeBadFork(current_fork)); + // Before checking the gossip duplicate filter, check that no prior change is already + // in our op pool. Ignore these messages: do not gossip, do not try to override the pool. + match self + .op_pool + .bls_to_execution_change_in_pool_equals(&bls_to_execution_change) + { + Some(true) => return Ok(ObservationOutcome::AlreadyKnown), + Some(false) => return Err(Error::BlsToExecutionConflictsWithPool), + None => (), } - let wall_clock_state = self.wall_clock_state()?; + // Use the head state to save advancing to the wall-clock slot unnecessarily. The message is + // signed with respect to the genesis fork version, and the slot check for gossip is applied + // separately. This `Arc` clone of the head is nice and cheap. + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; Ok(self .observed_bls_to_execution_changes .lock() - .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + .verify_and_observe(bls_to_execution_change, head_state, &self.spec)?) + } + + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Ignore BLS to execution changes on gossip prior to Capella. + if !self.current_slot_is_post_capella()? { + return Err(Error::BlsToExecutionPriorToCapella); + } + self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change) + .or_else(|e| { + // On gossip treat conflicts the same as duplicates [IGNORE]. + match e { + Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown), + e => Err(e), + } + }) + } + + /// Check if the current slot is greater than or equal to the Capella fork epoch. + pub fn current_slot_is_post_capella(&self) -> Result { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + Ok(false) + } else { + Ok(true) + } } /// Import a BLS to execution change to the op pool. + /// + /// Return `true` if the change was added to the pool. pub fn import_bls_to_execution_change( &self, bls_to_execution_change: SigVerifiedOp, - ) { + ) -> bool { if self.eth1_chain.is_some() { self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change); + .insert_bls_to_execution_change(bls_to_execution_change) + } else { + false } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index fcb2c53a4..c138695a7 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -206,7 +206,8 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), - BlsToExecutionChangeBadFork(ForkName), + BlsToExecutionPriorToCapella, + BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), BlobsUnavailable, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f75f911d5..24688af73 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -149,6 +149,7 @@ pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, + withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] @@ -171,6 +172,17 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); + // For the interop genesis state we know that the withdrawal credentials are set equal + // to the validator keypairs. Check for any manually initialised credentials. + assert!( + self.withdrawal_keypairs.is_empty(), + "withdrawal credentials are ignored by fresh_ephemeral_store" + ); + self.withdrawal_keypairs = validator_keypairs + .iter() + .map(|kp| Some(kp.clone())) + .collect(); + let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -283,6 +295,7 @@ where eth_spec_instance, spec: None, validator_keypairs: None, + withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, @@ -545,6 +558,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, + withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, @@ -560,6 +574,12 @@ where /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, + /// Optional BLS withdrawal keys for each validator. + /// + /// If a validator index is missing from this vec or their entry is `None` then either + /// no BLS withdrawal key was set for them (they had an address from genesis) or the test + /// initializer neglected to set this field. + pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: ChainSpec, @@ -1471,6 +1491,44 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn make_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> SignedBlsToExecutionChange { + let keypair = self.get_withdrawal_keypair(validator_index); + self.make_bls_to_execution_change_with_keys( + validator_index, + address, + &keypair.pk, + &keypair.sk, + ) + } + + pub fn make_bls_to_execution_change_with_keys( + &self, + validator_index: u64, + address: Address, + pubkey: &PublicKey, + secret_key: &SecretKey, + ) -> SignedBlsToExecutionChange { + let genesis_validators_root = self.chain.genesis_validators_root; + BlsToExecutionChange { + validator_index, + from_bls_pubkey: pubkey.compress(), + to_execution_address: address, + } + .sign(secret_key, genesis_validators_root, &self.chain.spec) + } + + pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { + self.withdrawal_keypairs + .get(validator_index as usize) + .expect("BLS withdrawal key missing from harness") + .as_ref() + .expect("no withdrawal key for validator") + } + pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e6d779707..7e63db421 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1671,7 +1671,7 @@ pub fn serve( .and_then( |chain: Arc>, address_changes: Vec, - #[allow(unused)] network_tx: UnboundedSender>, + network_tx: UnboundedSender>, log: Logger| { blocking_json_task(move || { let mut failures = vec![]; @@ -1679,15 +1679,38 @@ pub fn serve( for (index, address_change) in address_changes.into_iter().enumerate() { let validator_index = address_change.message.validator_index; - match chain.verify_bls_to_execution_change_for_gossip(address_change) { + match chain.verify_bls_to_execution_change_for_http_api(address_change) { Ok(ObservationOutcome::New(verified_address_change)) => { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - chain.import_bls_to_execution_change(verified_address_change); + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let publish = chain.current_slot_is_post_capella().unwrap_or(false); + if publish { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = + chain.import_bls_to_execution_change(verified_address_change); + + info!( + log, + "Processed BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + "published" => publish, + "imported" => imported, + ); } Ok(ObservationOutcome::AlreadyKnown) => { debug!( @@ -1697,11 +1720,12 @@ pub fn serve( ); } Err(e) => { - error!( + warn!( log, "Invalid BLS to execution change"; "validator_index" => validator_index, - "source" => "HTTP API", + "reason" => ?e, + "source" => "HTTP", ); failures.push(api_types::Failure::new( index, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 942a1167c..eaaa4e864 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,8 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; -use eth2::types::{StateId, SyncSubcommittee}; -use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; type E = MinimalEthSpec; @@ -12,6 +12,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { spec } +fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_committee_duties_across_fork() { let validator_count = E::sync_committee_size(); @@ -307,3 +315,171 @@ async fn sync_committee_indices_across_fork() { ); } } + +/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. +fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { + code, + failures, + .. + }) = error else { + panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") + }; + assert_eq!(code, status_code); + assert_eq!(failures.len(), indices.len()); + for (index, failure) in indices.into_iter().zip(failures) { + assert_eq!(failure.index, index as u64); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn bls_to_execution_changes_update_all_around_capella_fork() { + let validator_count = 128; + let fork_epoch = Epoch::new(2); + let spec = capella_spec(fork_epoch); + let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + // Create a bunch of valid address changes. + let valid_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index), + ) + }) + .collect::>(); + + // Address changes which conflict with `valid_address_changes` on the address chosen. + let conflicting_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index + 1), + ) + }) + .collect::>(); + + // Address changes signed with the wrong key. + let wrong_key_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + // Use the correct pubkey. + let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; + // And the wrong secret key. + let secret_key = &harness + .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .sk; + harness.make_bls_to_execution_change_with_keys( + validator_index, + Address::from_low_u64_be(validator_index), + pubkey, + secret_key, + ) + }) + .collect::>(); + + // Submit some changes before Capella. Just enough to fill two blocks. + let num_pre_capella = validator_count / 4; + let blocks_filled_pre_capella = 2; + assert_eq!( + num_pre_capella, + blocks_filled_pre_capella * max_bls_to_execution_changes + ); + + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Conflicting changes for the same validators should all fail. + let error = client + .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, (0..num_pre_capella).collect()); + + // Re-submitting the same changes should be accepted. + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Invalid changes signed with the wrong keys should all be rejected without affecting the seen + // indices filters (apply ALL of them). + let error = client + .post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, all_validators.clone()); + + // Advance to right before Capella. + let capella_slot = fork_epoch.start_slot(E::slots_per_epoch()); + harness.extend_to_slot(capella_slot - 1).await; + assert_eq!(harness.head_slot(), capella_slot - 1); + + // Add Capella blocks which should be full of BLS to execution changes. + for i in 0..validator_count / max_bls_to_execution_changes { + let head_block_root = harness.extend_slots(1).await; + let head_block = harness + .chain + .get_block(&head_block_root) + .await + .unwrap() + .unwrap(); + + let bls_to_execution_changes = head_block + .message() + .body() + .bls_to_execution_changes() + .unwrap(); + + // Block should be full. + assert_eq!( + bls_to_execution_changes.len(), + max_bls_to_execution_changes, + "block not full on iteration {i}" + ); + + // Included changes should be the ones from `valid_address_changes` in any order. + for address_change in bls_to_execution_changes.iter() { + assert!(valid_address_changes.contains(address_change)); + } + + // After the initial 2 blocks, add the rest of the changes using a large + // request containing all the valid, all the conflicting and all the invalid. + // Despite the invalid and duplicate messages, the new ones should still get picked up by + // the pool. + if i == blocks_filled_pre_capella - 1 { + let all_address_changes: Vec<_> = [ + valid_address_changes.clone(), + conflicting_address_changes.clone(), + wrong_key_address_changes.clone(), + ] + .concat(); + + let error = client + .post_beacon_pool_bls_to_execution_changes(&all_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error( + error, + 400, + (validator_count..3 * validator_count).collect(), + ); + } + } + + // Eventually all validators should have eth1 withdrawal credentials. + let head_state = harness.get_current_state(); + for validator in head_state.validators() { + assert!(validator.has_eth1_withdrawal_credential(&spec)); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f9d942fb9..90a53b267 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1231,7 +1231,7 @@ impl Worker { "error" => ?e ); // We ignore pre-capella messages without penalizing peers. - if matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { self.propagate_validation_result( message_id, peer_id, diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs new file mode 100644 index 000000000..84513d466 --- /dev/null +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -0,0 +1,105 @@ +use state_processing::SigVerifiedOp; +use std::collections::{hash_map::Entry, HashMap}; +use std::sync::Arc; +use types::{ + AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + SignedBlsToExecutionChange, +}; + +/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. +/// +/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, +/// and is less-relevant after that. +#[derive(Debug, Default)] +pub struct BlsToExecutionChanges { + /// Map from validator index to BLS to execution change. + by_validator_index: HashMap>>, + /// Last-in-first-out (LIFO) queue of verified messages. + queue: Vec>>, +} + +impl BlsToExecutionChanges { + pub fn existing_change_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.by_validator_index + .get(&address_change.message.validator_index) + .map(|existing| existing.as_inner() == address_change) + } + + pub fn insert( + &mut self, + verified_change: SigVerifiedOp, + ) -> bool { + // Wrap in an `Arc` once on insert. + let verified_change = Arc::new(verified_change); + match self + .by_validator_index + .entry(verified_change.as_inner().message.validator_index) + { + Entry::Vacant(entry) => { + self.queue.push(verified_change.clone()); + entry.insert(verified_change); + true + } + Entry::Occupied(_) => false, + } + } + + /// FIFO ordering, used for persistence to disk. + pub fn iter_fifo( + &self, + ) -> impl Iterator>> { + self.queue.iter() + } + + /// LIFO ordering, used for block packing. + pub fn iter_lifo( + &self, + ) -> impl Iterator>> { + self.queue.iter().rev() + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune>( + &mut self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + let mut validator_indices_pruned = vec![]; + + self.queue.retain(|address_change| { + let validator_index = address_change.as_inner().message.validator_index; + head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| { + let prune = validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }); + if prune { + validator_indices_pruned.push(validator_index); + } + !prune + }) + }); + + for validator_index in validator_indices_pruned { + self.by_validator_index.remove(&validator_index); + } + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70e0d56bc..4643addad 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -2,6 +2,7 @@ mod attestation; mod attestation_id; mod attestation_storage; mod attester_slashing; +mod bls_to_execution_changes; mod max_cover; mod metrics; mod persistence; @@ -18,6 +19,7 @@ pub use persistence::{ pub use reward_cache::RewardCache; use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; @@ -51,8 +53,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, - /// Map from credential changing validator to their execution change data. - bls_to_execution_changes: RwLock>>, + /// Map from credential changing validator to their position in the queue. + bls_to_execution_changes: RwLock>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -513,15 +515,28 @@ impl OperationPool { ); } - /// Insert a BLS to execution change into the pool. + /// Check if an address change equal to `address_change` is already in the pool. + /// + /// Return `None` if no address change for the validator index exists in the pool. + pub fn bls_to_execution_change_in_pool_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.bls_to_execution_changes + .read() + .existing_change_equals(address_change) + } + + /// Insert a BLS to execution change into the pool, *only if* no prior change is known. + /// + /// Return `true` if the change was inserted. pub fn insert_bls_to_execution_change( &self, verified_change: SigVerifiedOp, - ) { - self.bls_to_execution_changes.write().insert( - verified_change.as_inner().message.validator_index, - verified_change, - ); + ) -> bool { + self.bls_to_execution_changes + .write() + .insert(verified_change) } /// Get a list of execution changes for inclusion in a block. @@ -533,7 +548,7 @@ impl OperationPool { spec: &ChainSpec, ) -> Vec { filter_limit_operations( - self.bls_to_execution_changes.read().values(), + self.bls_to_execution_changes.read().iter_lifo(), |address_change| { address_change.signature_is_still_valid(&state.fork()) && state @@ -548,33 +563,15 @@ impl OperationPool { } /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. - /// - /// The block check is necessary to avoid pruning too eagerly and losing the ability to include - /// address changes during re-orgs. This is isn't *perfect* so some address changes could - /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished - /// due to the gossip duplicate rules. pub fn prune_bls_to_execution_changes>( &self, head_block: &SignedBeaconBlock, head_state: &BeaconState, spec: &ChainSpec, ) { - prune_validator_hash_map( - &mut self.bls_to_execution_changes.write(), - |validator_index, validator| { - validator.has_eth1_withdrawal_credential(spec) - && head_block - .message() - .body() - .bls_to_execution_changes() - .map_or(true, |recent_changes| { - !recent_changes - .iter() - .any(|c| c.message.validator_index == validator_index) - }) - }, - head_state, - ); + self.bls_to_execution_changes + .write() + .prune(head_block, head_state, spec) } /// Prune all types of transactions given the latest head state and head fork. @@ -663,8 +660,8 @@ impl OperationPool { pub fn get_all_bls_to_execution_changes(&self) -> Vec { self.bls_to_execution_changes .read() - .iter() - .map(|(_, address_change)| address_change.as_inner().clone()) + .iter_fifo() + .map(|address_change| address_change.as_inner().clone()) .collect() } } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 043e6fb7f..4948040ae 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -105,8 +106,8 @@ impl PersistedOperationPool { let bls_to_execution_changes = operation_pool .bls_to_execution_changes .read() - .iter() - .map(|(_, bls_to_execution_change)| bls_to_execution_change.clone()) + .iter_fifo() + .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) .collect(); PersistedOperationPool::V14(PersistedOperationPoolV14 { @@ -153,18 +154,13 @@ impl PersistedOperationPool { PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { return Err(OpPoolError::IncorrectOpPoolVariant) } - PersistedOperationPool::V14(pool) => RwLock::new( - pool.bls_to_execution_changes - .iter() - .cloned() - .map(|bls_to_execution_change| { - ( - bls_to_execution_change.as_inner().message.validator_index, - bls_to_execution_change, - ) - }) - .collect(), - ), + PersistedOperationPool::V14(pool) => { + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + for bls_to_execution_change in pool.bls_to_execution_changes { + bls_to_execution_changes.insert(bls_to_execution_change); + } + RwLock::new(bls_to_execution_changes) + } }; let op_pool = OperationPool { attestations, diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 257819fd8..e9fb8109d 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1042,6 +1042,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/bls_to_execution_changes` + pub async fn post_beacon_pool_bls_to_execution_changes( + &self, + address_changes: &[SignedBlsToExecutionChange], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("bls_to_execution_changes"); + + self.post(path, &address_changes).await?; + + Ok(()) + } + /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { use ssz::Decode; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index efd356462..50ac2ff3d 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -67,7 +67,7 @@ where fn new(op: T, state: &BeaconState) -> Self { let verified_against = VerifiedAgainst { fork_versions: op - .verification_epochs(state.current_epoch()) + .verification_epochs() .into_iter() .map(|epoch| state.fork().get_fork_version(epoch)) .collect(), @@ -89,13 +89,9 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { - // Pass the fork's epoch as the effective current epoch. If the message is a current-epoch - // style message like `SignedBlsToExecutionChange` then `get_fork_version` will return the - // current fork version and we'll check it matches the fork version the message was checked - // against. - let effective_current_epoch = current_fork.epoch; + // The .all() will return true if the iterator is empty. self.as_inner() - .verification_epochs(effective_current_epoch) + .verification_epochs() .into_iter() .zip(self.verified_against.fork_versions.iter()) .all(|(epoch, verified_fork_version)| { @@ -126,12 +122,8 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. /// - /// If the message contains no inherent epoch it should return the `current_epoch` that is - /// passed in, as that's the epoch at which it was verified. - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; + /// If the message is valid across all forks it should return an empty smallvec. + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl VerifyOperation for SignedVoluntaryExit { @@ -147,7 +139,7 @@ impl VerifyOperation for SignedVoluntaryExit { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } } @@ -165,7 +157,7 @@ impl VerifyOperation for AttesterSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, self.attestation_2.data.target.epoch @@ -186,7 +178,7 @@ impl VerifyOperation for ProposerSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self .signed_header_1 @@ -209,10 +201,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { - smallvec![current_epoch] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![] } } diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index f6064f65a..cb73e43f9 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -28,6 +28,26 @@ pub struct BlsToExecutionChange { impl SignedRoot for BlsToExecutionChange {} +impl BlsToExecutionChange { + pub fn sign( + self, + secret_key: &SecretKey, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlsToExecutionChange { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + genesis_validators_root, + ); + let message = self.signing_root(domain); + SignedBlsToExecutionChange { + message: self, + signature: secret_key.sign(message), + } + } +} + #[cfg(test)] mod tests { use super::*;