From 4d9e137e6ad4e738a976164d393f2e0b4896a00f Mon Sep 17 00:00:00 2001 From: Madman600 <38760981+Madman600@users.noreply.github.com> Date: Mon, 16 Jan 2023 03:42:08 +0000 Subject: [PATCH 01/25] Update checkpoint-sync.md (#3831) Remove infura checkpoint sync instructions. Co-authored-by: Adam Patacchiola --- book/src/checkpoint-sync.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 736aa08f1..893c545cb 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,17 +48,6 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` -### Use Infura as a remote beacon node provider - -You can use Infura as the remote beacon node provider to load the initial checkpoint state. - -1. Sign up for the free Infura ETH2 API using the `Create new project tab` on the [Infura dashboard](https://infura.io/dashboard). -2. Copy the HTTPS endpoint for the required network (Mainnet/Prater). -3. Use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` -lighthouse bn --checkpoint-sync-url https://:@eth2-beacon-mainnet.infura.io ... -``` - ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks From 912ea2a5cab0ba8c4a9ac9fd4f3717e50a332301 Mon Sep 17 00:00:00 2001 From: Santiago Medina Date: Mon, 16 Jan 2023 03:42:09 +0000 Subject: [PATCH 02/25] Return HTTP 404 rather than 405 (#3836) ## Issue Addressed Issue #3112 ## Proposed Changes Add `Filter::recover` to the GET chain to handle rejections specifically as 404 NOT FOUND ## Additional Info Making a request to `http://localhost:5052/not_real` now returns the following: ``` { "code": 404, "message": "NOT_FOUND", "stacktraces": [] } ``` Co-authored-by: Paul Hauner --- beacon_node/http_api/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6cfdaf5db..8cd0b856b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3383,7 +3383,8 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) - .or(get_events.boxed()), + .or(get_events.boxed()) + .recover(warp_utils::reject::handle_rejection), ) .boxed() .or(warp::post().and( @@ -3407,7 +3408,8 @@ pub fn serve( .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) .or(post_lighthouse_block_rewards.boxed()) - .or(post_lighthouse_ui_validator_metrics.boxed()), + .or(post_lighthouse_ui_validator_metrics.boxed()) + .recover(warp_utils::reject::handle_rejection), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) From 6ac1c5b43951f26f18df8e0b7553fa93c30e0250 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 16 Jan 2023 03:42:10 +0000 Subject: [PATCH 03/25] Add CLI flag to specify the format of logs written to the logfile (#3839) ## Proposed Changes Decouple the stdout and logfile formats by adding the `--logfile-format` CLI flag. This behaves identically to the existing `--log-format` flag, but instead will only affect the logs written to the logfile. The `--log-format` flag will no longer have any effect on the contents of the logfile. ## Additional Info This avoids being a breaking change by causing `logfile-format` to default to the value of `--log-format` if it is not provided. This means that users who were previously relying on being able to use a JSON formatted logfile will be able to continue to use `--log-format JSON`. Users who want to use JSON on stdout and default logs in the logfile, will need to pass the following flags: `--log-format JSON --logfile-format DEFAULT` --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 4 +++- lighthouse/src/main.rs | 15 +++++++++++++++ lighthouse/tests/beacon_node.rs | 19 ++++++++++++++++++- testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 7 files changed, 40 insertions(+), 2 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35..137a4534b 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -792,6 +792,7 @@ fn run( debug_level: String::from("trace"), logfile_debug_level: String::from("trace"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fad7edeb1..8ef67e82d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -50,6 +50,7 @@ pub struct LoggerConfig { pub debug_level: String, pub logfile_debug_level: String, pub log_format: Option, + pub logfile_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, @@ -64,6 +65,7 @@ impl Default for LoggerConfig { debug_level: String::from("info"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 200, @@ -252,7 +254,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format.as_deref() { + .format(match config.logfile_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index da72204f9..64ee0432f 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -99,6 +99,15 @@ fn main() { .default_value("debug") .global(true), ) + .arg( + Arg::with_name("logfile-format") + .long("logfile-format") + .value_name("FORMAT") + .help("Specifies the log format used when emitting logs to the logfile.") + .possible_values(&["DEFAULT", "JSON"]) + .takes_value(true) + .global(true) + ) .arg( Arg::with_name("logfile-max-size") .long("logfile-max-size") @@ -402,6 +411,11 @@ fn run( .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; + let logfile_format = matches + .value_of("logfile-format") + // Ensure that `logfile-format` defaults to the value of `log-format`. + .or_else(|| matches.value_of("log-format")); + let logfile_max_size: u64 = matches .value_of("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? @@ -452,6 +466,7 @@ fn run( debug_level: String::from(debug_level), logfile_debug_level: String::from(logfile_debug_level), log_format: log_format.map(String::from), + logfile_format: logfile_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4a2e160e8..7e581ee61 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1662,7 +1662,24 @@ fn logfile_no_restricted_perms_flag() { assert!(config.logger_config.is_restricted == false); }); } - +#[test] +fn logfile_format_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.logger_config.logfile_format, None)); +} +#[test] +fn logfile_format_flag() { + CommandLineTest::new() + .flag("logfile-format", Some("JSON")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.logger_config.logfile_format, + Some("JSON".to_string()) + ) + }); +} #[test] fn sync_eth1_chain_default() { CommandLineTest::new() diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 8284bff60..42aefea7a 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -62,6 +62,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 53c4447da..1a026ded4 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -47,6 +47,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 1c8b41f05..9d759715e 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: String::from(log_level), logfile_debug_level: String::from("debug"), log_format: log_format.map(String::from), + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, From 9a970ce3a2ff5ab64d19e48aac984ef12db1078c Mon Sep 17 00:00:00 2001 From: David Theodore Date: Tue, 17 Jan 2023 05:13:47 +0000 Subject: [PATCH 04/25] add better err reporting UnableToOpenVotingKeystore (#3781) ## Issue Addressed #3780 ## Proposed Changes Add error reporting that notifies the node operator that the `voting_keystore_path` in their `validator_definitions.yml` file may be incorrect. ## Additional Info There is more info in issue #3780 Co-authored-by: Paul Hauner --- validator_client/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 4db980405..00c3db7aa 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -31,6 +31,7 @@ use crate::beacon_node_fallback::{ }; use crate::doppelganger_service::DoppelgangerService; use crate::graffiti_file::GraffitiFile; +use crate::initialized_validators::Error::UnableToOpenVotingKeystore; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; @@ -184,7 +185,16 @@ impl ProductionValidatorClient { log.clone(), ) .await - .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; + .map_err(|e| { + match e { + UnableToOpenVotingKeystore(err) => { + format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ + make sure to update the location of voting_keystore_path in your validator_definitions.yml", err) + }, + err => { + format!("Unable to initialize validators: {:?}", err)} + } + })?; let voting_pubkeys: Vec<_> = validators.iter_voting_pubkeys().collect(); From b4d9fc03ee54ecb8b916453189fbf422eb943285 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 17 Jan 2023 05:13:48 +0000 Subject: [PATCH 05/25] add logging for starting request and receiving block (#3858) ## Issue Addressed #3853 ## Proposed Changes Added `INFO` level logs for requesting and receiving the unsigned block. ## Additional Info Logging for successfully publishing the signed block is already there. And seemingly there is a log for when "We realize we are going to produce a block" in the `start_update_service`: `info!(log, "Block production service started"); `. Is there anywhere else you'd like to see logging around this event? Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> --- validator_client/src/block_service.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d4acbe756..bef51a694 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -333,6 +333,11 @@ impl BlockService { let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + info!( + log, + "Requesting unsigned block"; + "slot" => slot.as_u64(), + ); // Request block from first responsive beacon node. let block = self .beacon_nodes @@ -383,6 +388,11 @@ impl BlockService { } }; + info!( + log, + "Received unsigned block"; + "slot" => slot.as_u64(), + ); if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" @@ -401,6 +411,11 @@ impl BlockService { .await .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + info!( + log, + "Publishing signed block"; + "slot" => slot.as_u64(), + ); // Publish block with first available beacon node. self.beacon_nodes .first_success( From 480309fb96a3939229460e8b812c755d3e5e0a77 Mon Sep 17 00:00:00 2001 From: aliask Date: Tue, 17 Jan 2023 05:13:49 +0000 Subject: [PATCH 06/25] Fix some dead links in markdown files (#3885) ## Issue Addressed No issue has been raised for these broken links. ## Proposed Changes Update links with the new URLs for the same document. ## Additional Info ~The link for the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list is also broken, but I can't find the correct link.~ Co-authored-by: Paul Hauner --- README.md | 2 +- book/src/merge-migration.md | 4 ++-- book/src/run_a_node.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 859d5c4c6..3565882d6 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ of the Lighthouse book. The best place for discussion is the [Lighthouse Discord server](https://discord.gg/cyAszAh). -Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb/) mailing list for email +Sign up to the [Lighthouse Development Updates](https://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. Encrypt sensitive messages using our [PGP diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 08f1b51e4..ec9aeaaee 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -58,7 +58,7 @@ supported. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page for your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) @@ -203,5 +203,5 @@ guidance for specific setups. - [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) -- [EthDocker: Merge Preparation](https://eth-docker.net/docs/About/MergePrep/) +- [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) - [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index 5ce42aa63..fb112c367 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -26,7 +26,7 @@ has authority to control the execution engine. Each execution engine has its own flags for configuring the engine API and JWT. Please consult the relevant page of your execution engine for the required flags: -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) - [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) From 26787412cd5e5447f00123b4e4afe5d779765b0f Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Thu, 19 Jan 2023 05:42:17 -0600 Subject: [PATCH 07/25] Update engine_api to Latest spec (#3893) * Update engine_api to Latest spec * Small Test Fix * Fix Test Deserialization Issue --- .../tests/payload_invalidation.rs | 4 +- beacon_node/execution_layer/src/engine_api.rs | 73 +-- .../execution_layer/src/engine_api/http.rs | 78 ++-- .../src/engine_api/json_structures.rs | 429 +++++++++--------- .../test_utils/execution_block_generator.rs | 4 +- .../src/test_utils/handle_rpc.rs | 87 ++-- 6 files changed, 365 insertions(+), 310 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2d8427e30..54d773447 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1007,9 +1007,7 @@ async fn payload_preparation() { .unwrap(), fee_recipient, None, - ) - .downgrade_to_v1() - .unwrap(); + ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4970361a5..afc5cffe2 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -12,6 +12,7 @@ pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, }; +use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge}; pub mod auth; pub mod http; @@ -267,7 +268,7 @@ pub struct PayloadAttributes { #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, #[superstruct(only(V2))] - pub withdrawals: Option>, + pub withdrawals: Vec, } impl PayloadAttributes { @@ -277,31 +278,18 @@ impl PayloadAttributes { suggested_fee_recipient: Address, withdrawals: Option>, ) -> Self { - // this should always return the highest version - PayloadAttributes::V2(PayloadAttributesV2 { - timestamp, - prev_randao, - suggested_fee_recipient, - withdrawals, - }) - } - - pub fn downgrade_to_v1(self) -> Result { - match self { - PayloadAttributes::V1(_) => Ok(self), - PayloadAttributes::V2(v2) => { - if v2.withdrawals.is_some() { - return Err(Error::BadConversion( - "Downgrading from PayloadAttributesV2 with non-null withdrawals" - .to_string(), - )); - } - Ok(PayloadAttributes::V1(PayloadAttributesV1 { - timestamp: v2.timestamp, - prev_randao: v2.prev_randao, - suggested_fee_recipient: v2.suggested_fee_recipient, - })) - } + match withdrawals { + Some(withdrawals) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + None => PayloadAttributes::V1(PayloadAttributesV1 { + timestamp, + prev_randao, + suggested_fee_recipient, + }), } } } @@ -326,6 +314,39 @@ pub struct ProposeBlindedBlockResponse { pub validation_error: Option, } +#[superstruct( + variants(Merge, Capella, Eip4844), + variant_attributes(derive(Clone, Debug, PartialEq),), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] +#[derive(Clone, Debug, PartialEq)] +pub struct GetPayloadResponse { + #[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))] + pub execution_payload: ExecutionPayloadMerge, + #[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))] + pub execution_payload: ExecutionPayloadCapella, + #[superstruct(only(Eip4844), partial_getter(rename = "execution_payload_eip4844"))] + pub execution_payload: ExecutionPayloadEip4844, + pub block_value: Uint256, +} + +impl GetPayloadResponse { + pub fn execution_payload(self) -> ExecutionPayload { + match self { + GetPayloadResponse::Merge(response) => { + ExecutionPayload::Merge(response.execution_payload) + } + GetPayloadResponse::Capella(response) => { + ExecutionPayload::Capella(response.execution_payload) + } + GetPayloadResponse::Eip4844(response) => { + ExecutionPayload::Eip4844(response.execution_payload) + } + } + } +} + // This name is work in progress, it could // change when this method is actually proposed // but I'm writing this as it has been described diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8ad3066f7..60725192b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -735,7 +735,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV1::try_from(execution_payload)?]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -752,7 +752,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]); + let params = json!([JsonExecutionPayload::from(execution_payload)]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -767,7 +767,6 @@ impl HttpJsonRpc { pub async fn get_payload_v1( &self, - fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); @@ -780,25 +779,41 @@ impl HttpJsonRpc { ) .await?; - JsonExecutionPayload::V1(payload_v1).try_into_execution_payload(fork_name) + Ok(JsonExecutionPayload::V1(payload_v1).into()) } pub async fn get_payload_v2( &self, fork_name: ForkName, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); - let response: JsonGetPayloadResponse = self - .rpc_request( - ENGINE_GET_PAYLOAD_V2, - params, - ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - - JsonExecutionPayload::V2(response.execution_payload).try_into_execution_payload(fork_name) + match fork_name { + ForkName::Merge => { + let response: JsonGetPayloadResponseV1 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V1(response).into()) + } + ForkName::Capella => { + let response: JsonGetPayloadResponseV2 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V2, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + Ok(JsonGetPayloadResponse::V2(response).into()) + } + ForkName::Base | ForkName::Altair | ForkName::Eip4844 => Err( + Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), + ), + } } pub async fn get_blobs_bundle_v1( @@ -935,9 +950,15 @@ impl HttpJsonRpc { ) -> Result, Error> { let supported_apis = self.get_cached_supported_apis().await?; if supported_apis.get_payload_v2 { - self.get_payload_v2(fork_name, payload_id).await + // TODO: modify this method to return GetPayloadResponse instead + // of throwing away the `block_value` and returning only the + // ExecutionPayload + Ok(self + .get_payload_v2(fork_name, payload_id) + .await? + .execution_payload()) } else if supported_apis.new_payload_v1 { - self.get_payload_v1(fork_name, payload_id).await + self.get_payload_v1(payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayload")) } @@ -955,13 +976,8 @@ impl HttpJsonRpc { self.forkchoice_updated_v2(forkchoice_state, payload_attributes) .await } else if supported_apis.forkchoice_updated_v1 { - self.forkchoice_updated_v1( - forkchoice_state, - payload_attributes - .map(|pa| pa.downgrade_to_v1()) - .transpose()?, - ) - .await + self.forkchoice_updated_v1(forkchoice_state, payload_attributes) + .await } else { Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")) } @@ -976,9 +992,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{ - ExecutionPayloadMerge, ForkName, MainnetEthSpec, Transactions, Unsigned, VariableList, - }; + use types::{ExecutionPayloadMerge, MainnetEthSpec, Transactions, Unsigned, VariableList}; struct Tester { server: MockServer, @@ -1318,9 +1332,7 @@ mod test { Tester::new(true) .assert_request_equals( |client| async move { - let _ = client - .get_payload_v1::(ForkName::Merge, [42; 8]) - .await; + let _ = client.get_payload_v1::([42; 8]).await; }, json!({ "id": STATIC_ID, @@ -1333,9 +1345,7 @@ mod test { Tester::new(false) .assert_auth_failure(|client| async move { - client - .get_payload_v1::(ForkName::Merge, [42; 8]) - .await + client.get_payload_v1::([42; 8]).await }) .await; } @@ -1564,7 +1574,7 @@ mod test { // engine_getPayloadV1 REQUEST validation |client| async move { let _ = client - .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await; }, json!({ @@ -1599,7 +1609,7 @@ mod test { })], |client| async move { let payload = client - .get_payload_v1::(ForkName::Merge,str_to_payload_id("0xa247243752eb10b4")) + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 78a3cb475..ace15ebd8 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -64,7 +64,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2), + variants(V1, V2, V3), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "T: EthSpec", rename_all = "camelCase"), @@ -94,235 +94,234 @@ pub struct JsonExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, - #[superstruct(only(V2))] - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - #[serde(with = "eth2_serde_utils::u256_hex_be_opt")] - pub excess_data_gas: Option, + #[superstruct(only(V3))] + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub excess_data_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - #[superstruct(only(V2))] - pub withdrawals: Option>, + #[superstruct(only(V2, V3))] + pub withdrawals: VariableList, } -impl JsonExecutionPayload { - pub fn try_into_execution_payload( - self, - fork_name: ForkName, - ) -> Result, Error> { - match self { - JsonExecutionPayload::V1(v1) => match fork_name { - ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: v1.parent_hash, - fee_recipient: v1.fee_recipient, - state_root: v1.state_root, - receipts_root: v1.receipts_root, - logs_bloom: v1.logs_bloom, - prev_randao: v1.prev_randao, - block_number: v1.block_number, - gas_limit: v1.gas_limit, - gas_used: v1.gas_used, - timestamp: v1.timestamp, - extra_data: v1.extra_data, - base_fee_per_gas: v1.base_fee_per_gas, - block_hash: v1.block_hash, - transactions: v1.transactions, - })), - _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV1 for {}", fork_name))), - } - JsonExecutionPayload::V2(v2) => match fork_name { - ForkName::Merge => Ok(ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - })), - ForkName::Capella => Ok(ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - block_hash: v2.block_hash, - transactions: v2.transactions, - withdrawals: v2 - .withdrawals - .map(|v| { - Into::>::into(v) - .into_iter() - .map(Into::into) - .collect::>() - .into() - }) - .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadCapella".to_string()))? - })), - ForkName::Eip4844 => Ok(ExecutionPayload::Eip4844(ExecutionPayloadEip4844 { - parent_hash: v2.parent_hash, - fee_recipient: v2.fee_recipient, - state_root: v2.state_root, - receipts_root: v2.receipts_root, - logs_bloom: v2.logs_bloom, - prev_randao: v2.prev_randao, - block_number: v2.block_number, - gas_limit: v2.gas_limit, - gas_used: v2.gas_used, - timestamp: v2.timestamp, - extra_data: v2.extra_data, - base_fee_per_gas: v2.base_fee_per_gas, - excess_data_gas: v2.excess_data_gas.ok_or_else(|| Error::BadConversion("Null `excess_data_gas` field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))?, - block_hash: v2.block_hash, - transactions: v2.transactions, - withdrawals: v2 - .withdrawals - .map(|v| { - Into::>::into(v) - .into_iter() - .map(Into::into) - .collect::>() - .into() - }) - .ok_or_else(|| Error::BadConversion("Null withdrawal field converting JsonExecutionPayloadV2 -> ExecutionPayloadEip4844".to_string()))? - })), - _ => Err(Error::UnsupportedForkVariant(format!("Unsupported conversion from JsonExecutionPayloadV2 for {}", fork_name))), - } +impl From> for JsonExecutionPayloadV1 { + fn from(payload: ExecutionPayloadMerge) -> Self { + JsonExecutionPayloadV1 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for JsonExecutionPayloadV2 { + fn from(payload: ExecutionPayloadCapella) -> Self { + JsonExecutionPayloadV2 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for JsonExecutionPayloadV3 { + fn from(payload: ExecutionPayloadEip4844) -> Self { + JsonExecutionPayloadV3 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), } } } -impl TryFrom> for JsonExecutionPayloadV1 { - type Error = Error; - fn try_from(payload: ExecutionPayload) -> Result { - match payload { - ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV1 { - parent_hash: merge.parent_hash, - fee_recipient: merge.fee_recipient, - state_root: merge.state_root, - receipts_root: merge.receipts_root, - logs_bloom: merge.logs_bloom, - prev_randao: merge.prev_randao, - block_number: merge.block_number, - gas_limit: merge.gas_limit, - gas_used: merge.gas_used, - timestamp: merge.timestamp, - extra_data: merge.extra_data, - base_fee_per_gas: merge.base_fee_per_gas, - block_hash: merge.block_hash, - transactions: merge.transactions, - }), - ExecutionPayload::Capella(_) => Err(Error::UnsupportedForkVariant(format!( - "Unsupported conversion to JsonExecutionPayloadV1 for {}", - ForkName::Capella - ))), - ExecutionPayload::Eip4844(_) => Err(Error::UnsupportedForkVariant(format!( - "Unsupported conversion to JsonExecutionPayloadV1 for {}", - ForkName::Eip4844 - ))), +impl From> for JsonExecutionPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + match execution_payload { + ExecutionPayload::Merge(payload) => JsonExecutionPayload::V1(payload.into()), + ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), + ExecutionPayload::Eip4844(payload) => JsonExecutionPayload::V3(payload.into()), } } } -impl TryFrom> for JsonExecutionPayloadV2 { - type Error = Error; - fn try_from(payload: ExecutionPayload) -> Result { - match payload { - ExecutionPayload::Merge(merge) => Ok(JsonExecutionPayloadV2 { - parent_hash: merge.parent_hash, - fee_recipient: merge.fee_recipient, - state_root: merge.state_root, - receipts_root: merge.receipts_root, - logs_bloom: merge.logs_bloom, - prev_randao: merge.prev_randao, - block_number: merge.block_number, - gas_limit: merge.gas_limit, - gas_used: merge.gas_used, - timestamp: merge.timestamp, - extra_data: merge.extra_data, - base_fee_per_gas: merge.base_fee_per_gas, - excess_data_gas: None, - block_hash: merge.block_hash, - transactions: merge.transactions, - withdrawals: None, - }), - ExecutionPayload::Capella(capella) => Ok(JsonExecutionPayloadV2 { - parent_hash: capella.parent_hash, - fee_recipient: capella.fee_recipient, - state_root: capella.state_root, - receipts_root: capella.receipts_root, - logs_bloom: capella.logs_bloom, - prev_randao: capella.prev_randao, - block_number: capella.block_number, - gas_limit: capella.gas_limit, - gas_used: capella.gas_used, - timestamp: capella.timestamp, - extra_data: capella.extra_data, - base_fee_per_gas: capella.base_fee_per_gas, - excess_data_gas: None, - block_hash: capella.block_hash, - transactions: capella.transactions, - withdrawals: Some( - Vec::from(capella.withdrawals) - .into_iter() - .map(Into::into) - .collect::>() - .into(), - ), - }), - ExecutionPayload::Eip4844(eip4844) => Ok(JsonExecutionPayloadV2 { - parent_hash: eip4844.parent_hash, - fee_recipient: eip4844.fee_recipient, - state_root: eip4844.state_root, - receipts_root: eip4844.receipts_root, - logs_bloom: eip4844.logs_bloom, - prev_randao: eip4844.prev_randao, - block_number: eip4844.block_number, - gas_limit: eip4844.gas_limit, - gas_used: eip4844.gas_used, - timestamp: eip4844.timestamp, - extra_data: eip4844.extra_data, - base_fee_per_gas: eip4844.base_fee_per_gas, - excess_data_gas: Some(eip4844.excess_data_gas), - block_hash: eip4844.block_hash, - transactions: eip4844.transactions, - withdrawals: Some( - Vec::from(eip4844.withdrawals) - .into_iter() - .map(Into::into) - .collect::>() - .into(), - ), - }), +impl From> for ExecutionPayloadMerge { + fn from(payload: JsonExecutionPayloadV1) -> Self { + ExecutionPayloadMerge { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + } + } +} +impl From> for ExecutionPayloadCapella { + fn from(payload: JsonExecutionPayloadV2) -> Self { + ExecutionPayloadCapella { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), + } + } +} +impl From> for ExecutionPayloadEip4844 { + fn from(payload: JsonExecutionPayloadV3) -> Self { + ExecutionPayloadEip4844 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + excess_data_gas: payload.excess_data_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .cloned() + .map(Into::into) + .collect::>() + .into(), } } } +impl From> for ExecutionPayload { + fn from(json_execution_payload: JsonExecutionPayload) -> Self { + match json_execution_payload { + JsonExecutionPayload::V1(payload) => ExecutionPayload::Merge(payload.into()), + JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), + JsonExecutionPayload::V3(payload) => ExecutionPayload::Eip4844(payload.into()), + } + } +} + +#[superstruct( + variants(V1, V2, V3), + variant_attributes( + derive(Debug, PartialEq, Serialize, Deserialize), + serde(bound = "T: EthSpec", rename_all = "camelCase") + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +#[serde(untagged)] pub struct JsonGetPayloadResponse { + #[superstruct(only(V1), partial_getter(rename = "execution_payload_v1"))] + pub execution_payload: JsonExecutionPayloadV1, + #[superstruct(only(V2), partial_getter(rename = "execution_payload_v2"))] pub execution_payload: JsonExecutionPayloadV2, - // uncomment this when geth fixes its serialization - //#[serde(with = "eth2_serde_utils::u256_hex_be")] - //pub block_value: Uint256, + #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] + pub execution_payload: JsonExecutionPayloadV3, + #[serde(with = "eth2_serde_utils::u256_hex_be")] + pub block_value: Uint256, +} + +impl From> for GetPayloadResponse { + fn from(json_get_payload_response: JsonGetPayloadResponse) -> Self { + match json_get_payload_response { + JsonGetPayloadResponse::V1(response) => { + GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V2(response) => { + GetPayloadResponse::Capella(GetPayloadResponseCapella { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + JsonGetPayloadResponse::V3(response) => { + GetPayloadResponse::Eip4844(GetPayloadResponseEip4844 { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + }) + } + } + } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] @@ -376,9 +375,7 @@ pub struct JsonPayloadAttributes { pub prev_randao: Hash256, pub suggested_fee_recipient: Address, #[superstruct(only(V2))] - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub withdrawals: Option>, + pub withdrawals: Vec, } impl From for JsonPayloadAttributes { @@ -393,9 +390,7 @@ impl From for JsonPayloadAttributes { timestamp: pa.timestamp, prev_randao: pa.prev_randao, suggested_fee_recipient: pa.suggested_fee_recipient, - withdrawals: pa - .withdrawals - .map(|w| w.into_iter().map(Into::into).collect()), + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), }), } } @@ -413,9 +408,7 @@ impl From for PayloadAttributes { timestamp: jpa.timestamp, prev_randao: jpa.prev_randao, suggested_fee_recipient: jpa.suggested_fee_recipient, - withdrawals: jpa - .withdrawals - .map(|jw| jw.into_iter().map(Into::into).collect()), + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), }), } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 7790dcbed..63893375d 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -524,7 +524,7 @@ impl ExecutionBlockGenerator { base_fee_per_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + withdrawals: pa.withdrawals.clone().into(), }) } ForkName::Eip4844 => { @@ -545,7 +545,7 @@ impl ExecutionBlockGenerator { excess_data_gas: Uint256::one(), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), - withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(), + withdrawals: pa.withdrawals.clone().into(), }) } _ => unreachable!(), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f01ae00e8..1e0963649 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -79,9 +79,12 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { JsonExecutionPayload::V1(get_param::>(params, 0)?) } - ENGINE_NEW_PAYLOAD_V2 => { - JsonExecutionPayload::V2(get_param::>(params, 0)?) - } + ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V2(jep)) + .or_else(|_| { + get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V1(jep)) + })?, // TODO(4844) add that here.. _ => unreachable!(), }; @@ -93,9 +96,9 @@ pub async fn handle_rpc( // validate method called correctly according to shanghai fork time match fork { ForkName::Merge => { - if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() { + if matches!(request, JsonExecutionPayload::V2(_)) { return Err(format!( - "{} called with `withdrawals` before capella fork!", + "{} called with `ExecutionPayloadV2` before capella fork!", method )); } @@ -104,12 +107,9 @@ pub async fn handle_rpc( if method == ENGINE_NEW_PAYLOAD_V1 { return Err(format!("{} called after capella fork!", method)); } - if request.withdrawals().is_err() - || (request.withdrawals().is_ok() - && request.withdrawals().unwrap().is_none()) - { + if matches!(request, JsonExecutionPayload::V1(_)) { return Err(format!( - "{} called without `withdrawals` after capella fork!", + "{} called with `ExecutionPayloadV1` after capella fork!", method )); } @@ -138,7 +138,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.try_into_execution_payload(fork).unwrap()), + .new_payload(request.into()), ) } else { None @@ -171,14 +171,26 @@ pub async fn handle_rpc( // TODO(4844) add 4844 error checking here match method { - ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( - JsonExecutionPayloadV1::try_from(response).unwrap(), - ) - .unwrap()), - ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse { - execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(), - }) - .unwrap()), + ENGINE_GET_PAYLOAD_V1 => { + Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + } + ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V1(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV1 { + execution_payload, + block_value: 0.into(), + }) + .unwrap() + } + JsonExecutionPayload::V2(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV2 { + execution_payload, + block_value: 0.into(), + }) + .unwrap() + } + _ => unreachable!(), + }), _ => unreachable!(), } } @@ -190,8 +202,31 @@ pub async fn handle_rpc( jpa1.map(JsonPayloadAttributes::V1) } ENGINE_FORKCHOICE_UPDATED_V2 => { - let jpa2: Option = get_param(params, 1)?; - jpa2.map(JsonPayloadAttributes::V2) + // we can't use `deny_unknown_fields` without breaking compatibility with some + // clients that haven't updated to the latest engine_api spec. So instead we'll + // need to deserialize based on timestamp + get_param::>(params, 1).and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), + } + }) + .transpose() + })? } _ => unreachable!(), }; @@ -204,9 +239,9 @@ pub async fn handle_rpc( .get_fork_at_timestamp(*pa.timestamp()) { ForkName::Merge => { - if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() { + if matches!(pa, JsonPayloadAttributes::V2(_)) { return Err(format!( - "{} called with `withdrawals` before capella fork!", + "{} called with `JsonPayloadAttributesV2` before capella fork!", method )); } @@ -215,11 +250,9 @@ pub async fn handle_rpc( if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(format!("{} called after capella fork!", method)); } - if pa.withdrawals().is_err() - || (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none()) - { + if matches!(pa, JsonPayloadAttributes::V1(_)) { return Err(format!( - "{} called without `withdrawals` after capella fork!", + "{} called with `JsonPayloadAttributesV1` after capella fork!", method )); } From 208f531ae7dacff9549eb354eb2e2dd23f0d7490 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 20 Jan 2023 00:46:55 +0000 Subject: [PATCH 08/25] update antithesis dockerfile (#3883) Resolves https://github.com/sigp/lighthouse/issues/3879 Co-authored-by: realbigsean --- testing/antithesis/Dockerfile.libvoidstar | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 32e2d5648..bae180732 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,11 +1,9 @@ -FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +FROM rust:1.66.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse # Build lighthouse directly with a cargo build command, bypassing the Makefile. -# We have to use nightly in order to disable the new LLVM pass manager. -RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse - +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked From f8a3b3b95acb356104d6f5b990e6bf4e9ff801b0 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 20 Jan 2023 00:46:56 +0000 Subject: [PATCH 09/25] Improve block delay metrics (#3894) We recently ran a large-block experiment on the testnet and plan to do a further experiment on mainnet. Although the metrics recovered from lighthouse nodes were quite useful, I think we could do with greater resolution in the block delay metrics and get some specific values for each block (currently these can be lost to large exponential histogram buckets). This PR increases the resolution of the block delay histogram buckets, but also introduces a new metric which records the last block delay. Depending on the polling resolution of the metric server, we can lose some block delay information, however it will always give us a specific value and we will not lose exact data based on poor resolution histogram buckets. --- .../src/beacon_processor/worker/gossip_methods.rs | 4 ++++ beacon_node/network/src/metrics.rs | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index ef23f6761..c142359f3 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -715,6 +715,10 @@ impl Worker { &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, block_delay, ); + metrics::set_gauge( + &metrics::BEACON_BLOCK_LAST_DELAY, + block_delay.as_millis() as i64, + ); let verification_result = self .chain diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4f3f29f9..baf00720b 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -335,10 +335,18 @@ lazy_static! { pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_gossip_slot_start_delay_time", "Duration between when the block is received and the start of the slot it belongs to.", + // Create a custom bucket list for greater granularity in block delay + Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) + // NOTE: Previous values, which we may want to switch back to. // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] - decimal_buckets(-1,2) + //decimal_buckets(-1,2) ); + pub static ref BEACON_BLOCK_LAST_DELAY: Result = try_create_int_gauge( + "beacon_block_last_delay", + "Keeps track of the last block's delay from the start of the slot" + ); + pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_block_gossip_arrived_late_total", "Count of times when a gossip block arrived from the network later than the attestation deadline.", From 4deab888c9c48b16b29cf8bfc4b731524d9e2d33 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 20 Jan 2023 04:19:29 +0000 Subject: [PATCH 10/25] Switch allocator to jemalloc (#3697) ## Proposed Changes Another `tree-states` motivated PR, this adds `jemalloc` as the default allocator, with an option to use the system allocator by compiling with `FEATURES="" make`. - [x] Metrics - [x] Test on Windows - [x] Test on macOS - [x] Test with `musl` - [x] Metrics dashboard on `lighthouse-metrics` (https://github.com/sigp/lighthouse-metrics/pull/37) Co-authored-by: Michael Sproul --- .cargo/config.toml | 4 +++ .github/workflows/test-suite.yml | 14 +------- Cargo.lock | 41 +++++++++++++++++++++++ Cargo.toml | 1 + Makefile | 14 +++++--- book/src/installation-source.md | 7 +++- bors.toml | 1 - common/malloc_utils/Cargo.toml | 12 +++++-- common/malloc_utils/src/jemalloc.rs | 52 +++++++++++++++++++++++++++++ common/malloc_utils/src/lib.rs | 44 ++++++++++++++++++------ lcli/Cargo.toml | 5 +++ lighthouse/Cargo.toml | 2 ++ lighthouse/src/main.rs | 10 ++++++ 13 files changed, 175 insertions(+), 32 deletions(-) create mode 100644 .cargo/config.toml create mode 100644 common/malloc_utils/src/jemalloc.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..dac016300 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[env] +# Set the number of arenas to 16 when using jemalloc. +JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8d52f7fa7..57fee7183 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -306,16 +306,6 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it run: make check-benches - check-consensus: - name: check-consensus - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck consensus code in strict mode - run: make check-consensus clippy: name: clippy runs-on: ubuntu-latest @@ -382,14 +372,12 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY - # NOTE: cargo-udeps version is pinned until this issue is resolved: - # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force --version 0.1.30 + run: cargo install cargo-udeps --locked --force - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/Cargo.lock b/Cargo.lock index f1daf4dbd..56b372401 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2695,6 +2695,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + [[package]] name = "funty" version = "1.1.0" @@ -3595,6 +3601,38 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +[[package]] +name = "jemalloc-ctl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1891c671f3db85d8ea8525dd43ab147f9977041911d24a03e5a36187a7bfde9" +dependencies = [ + "jemalloc-sys", + "libc", + "paste", +] + +[[package]] +name = "jemalloc-sys" +version = "0.5.2+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134163979b6eed9564c98637b710b40979939ba351f59952708234ea11b5f3f8" +dependencies = [ + "cc", + "fs_extra", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "js-sys" version = "0.3.60" @@ -3703,6 +3741,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "log", + "malloc_utils", "sensitive_url", "serde", "serde_json", @@ -4510,6 +4549,8 @@ dependencies = [ name = "malloc_utils" version = "0.1.0" dependencies = [ + "jemalloc-ctl", + "jemallocator", "lazy_static", "libc", "lighthouse_metrics", diff --git a/Cargo.toml b/Cargo.toml index e254400e8..de01771eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,7 @@ members = [ "validator_client", "validator_client/slashing_protection", ] +resolver = "2" [patch] [patch.crates-io] diff --git a/Makefile b/Makefile index 33077a6c9..68ada1b4b 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,16 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 +# List of features to use when building natively. Can be overriden via the environment. +# No jemalloc on Windows +ifeq ($(OS),Windows_NT) + FEATURES?= +else + FEATURES?=jemalloc +endif + # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release @@ -101,10 +109,6 @@ cargo-fmt: check-benches: cargo check --workspace --benches -# Typechecks consensus code *without* allowing deprecated legacy arithmetic or metrics. -check-consensus: - cargo check -p state_processing --no-default-features - # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt diff --git a/book/src/installation-source.md b/book/src/installation-source.md index b3d83ef9f..8e515a41b 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -64,6 +64,7 @@ choco install protoc These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. + [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about ## Build Lighthouse @@ -128,8 +129,12 @@ Commonly used features include: * `gnosis`: support for the Gnosis Beacon Chain. * `portable`: support for legacy hardware. * `modern`: support for exclusively modern hardware. -* `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). +* `slasher-mdbx`: support for the MDBX slasher backend. Enabled by default. * `slasher-lmdb`: support for the LMDB slasher backend. +* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. + Not supported on Windows. + +[jemalloc]: https://jemalloc.net/ ## Compilation Profiles diff --git a/bors.toml b/bors.toml index 096ac3b29..9e633d63f 100644 --- a/bors.toml +++ b/bors.toml @@ -10,7 +10,6 @@ status = [ "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", - "check-consensus", "clippy", "arbitrary-check", "cargo-audit", diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 569eed608..c88ec0bd5 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -4,13 +4,21 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" libc = "0.2.79" parking_lot = "0.12.0" +jemalloc-ctl = { version = "0.5.0", optional = true } + +# Jemalloc's background_threads feature requires Linux (pthreads). +[target.'cfg(target_os = "linux")'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats", "background_threads"] } + +[target.'cfg(not(target_os = "linux"))'.dependencies] +jemallocator = { version = "0.5.0", optional = true, features = ["stats"] } [features] mallinfo2 = [] +jemalloc = ["jemallocator", "jemalloc-ctl"] +jemalloc-profiling = ["jemallocator/profiling"] diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs new file mode 100644 index 000000000..c796ea39a --- /dev/null +++ b/common/malloc_utils/src/jemalloc.rs @@ -0,0 +1,52 @@ +//! Set the allocator to `jemalloc`. +//! +//! Due to `jemalloc` requiring configuration at compile time or immediately upon runtime +//! initialisation it is configured via a Cargo config file in `.cargo/config.toml`. +//! +//! The `jemalloc` tuning can be overriden by: +//! +//! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. +//! B) `_RJEM_MALLOC_CONF` at runtime. +use jemalloc_ctl::{arenas, epoch, stats, Error}; +use lazy_static::lazy_static; +use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; + +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +// Metrics for jemalloc. +lazy_static! { + pub static ref NUM_ARENAS: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use"); + pub static ref BYTES_ALLOCATED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated"); + pub static ref BYTES_ACTIVE: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active"); + pub static ref BYTES_MAPPED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped"); + pub static ref BYTES_METADATA: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata"); + pub static ref BYTES_RESIDENT: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident"); + pub static ref BYTES_RETAINED: lighthouse_metrics::Result = + try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained"); +} + +pub fn scrape_jemalloc_metrics() { + scrape_jemalloc_metrics_fallible().unwrap() +} + +pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { + // Advance the epoch so that the underlying statistics are updated. + epoch::advance()?; + + set_gauge(&NUM_ARENAS, arenas::narenas::read()? as i64); + set_gauge(&BYTES_ALLOCATED, stats::allocated::read()? as i64); + set_gauge(&BYTES_ACTIVE, stats::active::read()? as i64); + set_gauge(&BYTES_MAPPED, stats::mapped::read()? as i64); + set_gauge(&BYTES_METADATA, stats::metadata::read()? as i64); + set_gauge(&BYTES_RESIDENT, stats::resident::read()? as i64); + set_gauge(&BYTES_RETAINED, stats::retained::read()? as i64); + + Ok(()) +} diff --git a/common/malloc_utils/src/lib.rs b/common/malloc_utils/src/lib.rs index b8aed948f..3bb242369 100644 --- a/common/malloc_utils/src/lib.rs +++ b/common/malloc_utils/src/lib.rs @@ -2,18 +2,18 @@ //! //! ## Conditional Compilation //! -//! Presently, only configuration for "The GNU Allocator" from `glibc` is supported. All other -//! allocators are ignored. +//! This crate can be compiled with different feature flags to support different allocators: //! -//! It is assumed that if the following two statements are correct then we should expect to -//! configure `glibc`: +//! - Jemalloc, via the `jemalloc` feature. +//! - GNU malloc, if no features are set and the system supports it. +//! - The system allocator, if no features are set and the allocator is not GNU malloc. +//! +//! It is assumed that if Jemalloc is not in use, and the following two statements are correct then +//! we should expect to configure `glibc`: //! //! - `target_os = linux` //! - `target_env != musl` //! -//! In all other cases this library will not attempt to do anything (i.e., all functions are -//! no-ops). -//! //! If the above conditions are fulfilled but `glibc` still isn't present at runtime then a panic //! may be triggered. It is understood that there's no way to be certain that a compatible `glibc` //! is present: https://github.com/rust-lang/rust/issues/33244. @@ -24,18 +24,42 @@ //! detecting `glibc` are best-effort. If this crate throws errors about undefined external //! functions, then try to compile with the `not_glibc_interface` module. -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod glibc; +#[cfg(feature = "jemalloc")] +mod jemalloc; + pub use interface::*; -#[cfg(all(target_os = "linux", not(target_env = "musl")))] +#[cfg(all( + target_os = "linux", + not(target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { pub use crate::glibc::configure_glibc_malloc as configure_memory_allocator; pub use crate::glibc::scrape_mallinfo_metrics as scrape_allocator_metrics; } -#[cfg(any(not(target_os = "linux"), target_env = "musl"))] +#[cfg(feature = "jemalloc")] +mod interface { + #[allow(dead_code)] + pub fn configure_memory_allocator() -> Result<(), String> { + Ok(()) + } + + pub use crate::jemalloc::scrape_jemalloc_metrics as scrape_allocator_metrics; +} + +#[cfg(all( + any(not(target_os = "linux"), target_env = "musl"), + not(feature = "jemalloc") +))] mod interface { #[allow(dead_code, clippy::unnecessary_wraps)] pub fn configure_memory_allocator() -> Result<(), String> { diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 93e529755..8ebac0ca6 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [features] portable = ["bls/supranational-portable"] fake_crypto = ['bls/fake_crypto'] +jemalloc = ["malloc_utils/jemalloc"] [dependencies] bls = { path = "../crypto/bls" } @@ -40,3 +41,7 @@ eth2 = { path = "../common/eth2" } snap = "1.0.1" beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } +malloc_utils = { path = "../common/malloc_utils" } + +[package.metadata.cargo-udeps.ignore] +normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f9597ade8..e88aa2485 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -24,6 +24,8 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] +# Use jemalloc. +jemalloc = ["malloc_utils/jemalloc"] [dependencies] beacon_node = { "path" = "../beacon_node" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 64ee0432f..babe2f8dc 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -31,6 +31,14 @@ fn bls_library_name() -> &'static str { } } +fn allocator_name() -> &'static str { + if cfg!(feature = "jemalloc") { + "jemalloc" + } else { + "system" + } +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -51,10 +59,12 @@ fn main() { "{}\n\ BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), + allocator_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() From 3e67fa303805a9bb25f0c2b6b5e62dada0e28065 Mon Sep 17 00:00:00 2001 From: antondlr Date: Fri, 20 Jan 2023 20:26:32 +0000 Subject: [PATCH 11/25] fix multiarch docker builds (#3904) ## Issue Addressed #3902 Tested and confirmed working [here](https://github.com/antondlr/lighthouse/actions/runs/3970418322) ## Additional Info buildx v0.10.0 added provenance attestations to images but they are packed in a way that's incompatible with `docker manifest` https://github.com/docker/buildx/releases --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13b841169..76e5d031a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -99,6 +99,7 @@ jobs: --platform=linux/${SHORT_ARCH} \ --file ./Dockerfile.cross . \ --tag ${IMAGE_NAME}:${VERSION}-${SHORT_ARCH}${VERSION_SUFFIX}${MODERNITY_SUFFIX} \ + --provenance=false \ --push build-docker-multiarch: name: build-docker-multiarch${{ matrix.modernity }} From d8abf2fc41506cb52e629b7ab9c513b854b8dfeb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 21 Jan 2023 10:39:59 +1100 Subject: [PATCH 12/25] Import BLS to execution changes before Capella (#3892) * Import BLS to execution changes before Capella * Test for BLS to execution change HTTP API * Pack BLS to execution changes in LIFO order * Remove unused var * Clippy --- beacon_node/beacon_chain/src/beacon_chain.rs | 60 +++++- beacon_node/beacon_chain/src/errors.rs | 3 +- beacon_node/beacon_chain/src/test_utils.rs | 58 ++++++ beacon_node/http_api/src/lib.rs | 46 +++-- beacon_node/http_api/tests/fork_tests.rs | 180 +++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 2 +- .../src/bls_to_execution_changes.rs | 105 ++++++++++ beacon_node/operation_pool/src/lib.rs | 61 +++--- beacon_node/operation_pool/src/persistence.rs | 24 +-- common/eth2/src/lib.rs | 18 ++ .../state_processing/src/verify_operation.rs | 31 +-- .../types/src/bls_to_execution_change.rs | 20 ++ 12 files changed, 517 insertions(+), 91 deletions(-) create mode 100644 beacon_node/operation_pool/src/bls_to_execution_changes.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 798a9b808..77de5eb14 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2227,32 +2227,74 @@ impl BeaconChain { } /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. - pub fn verify_bls_to_execution_change_for_gossip( + pub fn verify_bls_to_execution_change_for_http_api( &self, bls_to_execution_change: SignedBlsToExecutionChange, ) -> Result, Error> { - let current_fork = self.spec.fork_name_at_slot::(self.slot()?); - if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { - // Disallow BLS to execution changes prior to the Capella fork. - return Err(Error::BlsToExecutionChangeBadFork(current_fork)); + // Before checking the gossip duplicate filter, check that no prior change is already + // in our op pool. Ignore these messages: do not gossip, do not try to override the pool. + match self + .op_pool + .bls_to_execution_change_in_pool_equals(&bls_to_execution_change) + { + Some(true) => return Ok(ObservationOutcome::AlreadyKnown), + Some(false) => return Err(Error::BlsToExecutionConflictsWithPool), + None => (), } - let wall_clock_state = self.wall_clock_state()?; + // Use the head state to save advancing to the wall-clock slot unnecessarily. The message is + // signed with respect to the genesis fork version, and the slot check for gossip is applied + // separately. This `Arc` clone of the head is nice and cheap. + let head_snapshot = self.head().snapshot; + let head_state = &head_snapshot.beacon_state; Ok(self .observed_bls_to_execution_changes .lock() - .verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?) + .verify_and_observe(bls_to_execution_change, head_state, &self.spec)?) + } + + /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. + pub fn verify_bls_to_execution_change_for_gossip( + &self, + bls_to_execution_change: SignedBlsToExecutionChange, + ) -> Result, Error> { + // Ignore BLS to execution changes on gossip prior to Capella. + if !self.current_slot_is_post_capella()? { + return Err(Error::BlsToExecutionPriorToCapella); + } + self.verify_bls_to_execution_change_for_http_api(bls_to_execution_change) + .or_else(|e| { + // On gossip treat conflicts the same as duplicates [IGNORE]. + match e { + Error::BlsToExecutionConflictsWithPool => Ok(ObservationOutcome::AlreadyKnown), + e => Err(e), + } + }) + } + + /// Check if the current slot is greater than or equal to the Capella fork epoch. + pub fn current_slot_is_post_capella(&self) -> Result { + let current_fork = self.spec.fork_name_at_slot::(self.slot()?); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork { + Ok(false) + } else { + Ok(true) + } } /// Import a BLS to execution change to the op pool. + /// + /// Return `true` if the change was added to the pool. pub fn import_bls_to_execution_change( &self, bls_to_execution_change: SigVerifiedOp, - ) { + ) -> bool { if self.eth1_chain.is_some() { self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change); + .insert_bls_to_execution_change(bls_to_execution_change) + } else { + false } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 47ebc23ba..e4f675e70 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -206,7 +206,8 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), - BlsToExecutionChangeBadFork(ForkName), + BlsToExecutionPriorToCapella, + BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0373e9fc0..e71c1a987 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -148,6 +148,7 @@ pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, + withdrawal_keypairs: Vec>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] @@ -170,6 +171,17 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); + // For the interop genesis state we know that the withdrawal credentials are set equal + // to the validator keypairs. Check for any manually initialised credentials. + assert!( + self.withdrawal_keypairs.is_empty(), + "withdrawal credentials are ignored by fresh_ephemeral_store" + ); + self.withdrawal_keypairs = validator_keypairs + .iter() + .map(|kp| Some(kp.clone())) + .collect(); + let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -282,6 +294,7 @@ where eth_spec_instance, spec: None, validator_keypairs: None, + withdrawal_keypairs: vec![], chain_config: None, store_config: None, store: None, @@ -539,6 +552,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, + withdrawal_keypairs: self.withdrawal_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, @@ -554,6 +568,12 @@ where /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, + /// Optional BLS withdrawal keys for each validator. + /// + /// If a validator index is missing from this vec or their entry is `None` then either + /// no BLS withdrawal key was set for them (they had an address from genesis) or the test + /// initializer neglected to set this field. + pub withdrawal_keypairs: Vec>, pub chain: Arc>, pub spec: ChainSpec, @@ -1465,6 +1485,44 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn make_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> SignedBlsToExecutionChange { + let keypair = self.get_withdrawal_keypair(validator_index); + self.make_bls_to_execution_change_with_keys( + validator_index, + address, + &keypair.pk, + &keypair.sk, + ) + } + + pub fn make_bls_to_execution_change_with_keys( + &self, + validator_index: u64, + address: Address, + pubkey: &PublicKey, + secret_key: &SecretKey, + ) -> SignedBlsToExecutionChange { + let genesis_validators_root = self.chain.genesis_validators_root; + BlsToExecutionChange { + validator_index, + from_bls_pubkey: pubkey.compress(), + to_execution_address: address, + } + .sign(secret_key, genesis_validators_root, &self.chain.spec) + } + + pub fn get_withdrawal_keypair(&self, validator_index: u64) -> &Keypair { + self.withdrawal_keypairs + .get(validator_index as usize) + .expect("BLS withdrawal key missing from harness") + .as_ref() + .expect("no withdrawal key for validator") + } + pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0d36601cf..3f3025573 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1677,7 +1677,7 @@ pub fn serve( .and_then( |chain: Arc>, address_changes: Vec, - #[allow(unused)] network_tx: UnboundedSender>, + network_tx: UnboundedSender>, log: Logger| { blocking_json_task(move || { let mut failures = vec![]; @@ -1685,15 +1685,38 @@ pub fn serve( for (index, address_change) in address_changes.into_iter().enumerate() { let validator_index = address_change.message.validator_index; - match chain.verify_bls_to_execution_change_for_gossip(address_change) { + match chain.verify_bls_to_execution_change_for_http_api(address_change) { Ok(ObservationOutcome::New(verified_address_change)) => { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - chain.import_bls_to_execution_change(verified_address_change); + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let publish = chain.current_slot_is_post_capella().unwrap_or(false); + if publish { + publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = + chain.import_bls_to_execution_change(verified_address_change); + + info!( + log, + "Processed BLS to execution change"; + "validator_index" => validator_index, + "address" => ?address, + "published" => publish, + "imported" => imported, + ); } Ok(ObservationOutcome::AlreadyKnown) => { debug!( @@ -1703,11 +1726,12 @@ pub fn serve( ); } Err(e) => { - error!( + warn!( log, "Invalid BLS to execution change"; "validator_index" => validator_index, - "source" => "HTTP API", + "reason" => ?e, + "source" => "HTTP", ); failures.push(api_types::Failure::new( index, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 942a1167c..eaaa4e864 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,8 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; -use eth2::types::{StateId, SyncSubcommittee}; -use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; +use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; type E = MinimalEthSpec; @@ -12,6 +12,14 @@ fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { spec } +fn capella_spec(capella_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(capella_fork_epoch); + spec +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_committee_duties_across_fork() { let validator_count = E::sync_committee_size(); @@ -307,3 +315,171 @@ async fn sync_committee_indices_across_fork() { ); } } + +/// Assert that an HTTP API error has the given status code and indexed errors for the given indices. +fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Vec) { + let eth2::Error::ServerIndexedMessage(IndexedErrorMessage { + code, + failures, + .. + }) = error else { + panic!("wrong error, expected ServerIndexedMessage, got: {error:?}") + }; + assert_eq!(code, status_code); + assert_eq!(failures.len(), indices.len()); + for (index, failure) in indices.into_iter().zip(failures) { + assert_eq!(failure.index, index as u64); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn bls_to_execution_changes_update_all_around_capella_fork() { + let validator_count = 128; + let fork_epoch = Epoch::new(2); + let spec = capella_spec(fork_epoch); + let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + // Create a bunch of valid address changes. + let valid_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index), + ) + }) + .collect::>(); + + // Address changes which conflict with `valid_address_changes` on the address chosen. + let conflicting_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + harness.make_bls_to_execution_change( + validator_index, + Address::from_low_u64_be(validator_index + 1), + ) + }) + .collect::>(); + + // Address changes signed with the wrong key. + let wrong_key_address_changes = all_validators_u64 + .iter() + .map(|&validator_index| { + // Use the correct pubkey. + let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; + // And the wrong secret key. + let secret_key = &harness + .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .sk; + harness.make_bls_to_execution_change_with_keys( + validator_index, + Address::from_low_u64_be(validator_index), + pubkey, + secret_key, + ) + }) + .collect::>(); + + // Submit some changes before Capella. Just enough to fill two blocks. + let num_pre_capella = validator_count / 4; + let blocks_filled_pre_capella = 2; + assert_eq!( + num_pre_capella, + blocks_filled_pre_capella * max_bls_to_execution_changes + ); + + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Conflicting changes for the same validators should all fail. + let error = client + .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, (0..num_pre_capella).collect()); + + // Re-submitting the same changes should be accepted. + client + .post_beacon_pool_bls_to_execution_changes(&valid_address_changes[..num_pre_capella]) + .await + .unwrap(); + + // Invalid changes signed with the wrong keys should all be rejected without affecting the seen + // indices filters (apply ALL of them). + let error = client + .post_beacon_pool_bls_to_execution_changes(&wrong_key_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error(error, 400, all_validators.clone()); + + // Advance to right before Capella. + let capella_slot = fork_epoch.start_slot(E::slots_per_epoch()); + harness.extend_to_slot(capella_slot - 1).await; + assert_eq!(harness.head_slot(), capella_slot - 1); + + // Add Capella blocks which should be full of BLS to execution changes. + for i in 0..validator_count / max_bls_to_execution_changes { + let head_block_root = harness.extend_slots(1).await; + let head_block = harness + .chain + .get_block(&head_block_root) + .await + .unwrap() + .unwrap(); + + let bls_to_execution_changes = head_block + .message() + .body() + .bls_to_execution_changes() + .unwrap(); + + // Block should be full. + assert_eq!( + bls_to_execution_changes.len(), + max_bls_to_execution_changes, + "block not full on iteration {i}" + ); + + // Included changes should be the ones from `valid_address_changes` in any order. + for address_change in bls_to_execution_changes.iter() { + assert!(valid_address_changes.contains(address_change)); + } + + // After the initial 2 blocks, add the rest of the changes using a large + // request containing all the valid, all the conflicting and all the invalid. + // Despite the invalid and duplicate messages, the new ones should still get picked up by + // the pool. + if i == blocks_filled_pre_capella - 1 { + let all_address_changes: Vec<_> = [ + valid_address_changes.clone(), + conflicting_address_changes.clone(), + wrong_key_address_changes.clone(), + ] + .concat(); + + let error = client + .post_beacon_pool_bls_to_execution_changes(&all_address_changes) + .await + .unwrap_err(); + assert_server_indexed_error( + error, + 400, + (validator_count..3 * validator_count).collect(), + ); + } + } + + // Eventually all validators should have eth1 withdrawal credentials. + let head_state = harness.get_current_state(); + for validator in head_state.validators() { + assert!(validator.has_eth1_withdrawal_credential(&spec)); + } +} diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 00141312b..700bed866 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1220,7 +1220,7 @@ impl Worker { "error" => ?e ); // We ignore pre-capella messages without penalizing peers. - if matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) { + if matches!(e, BeaconChainError::BlsToExecutionPriorToCapella) { self.propagate_validation_result( message_id, peer_id, diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs new file mode 100644 index 000000000..84513d466 --- /dev/null +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -0,0 +1,105 @@ +use state_processing::SigVerifiedOp; +use std::collections::{hash_map::Entry, HashMap}; +use std::sync::Arc; +use types::{ + AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + SignedBlsToExecutionChange, +}; + +/// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. +/// +/// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, +/// and is less-relevant after that. +#[derive(Debug, Default)] +pub struct BlsToExecutionChanges { + /// Map from validator index to BLS to execution change. + by_validator_index: HashMap>>, + /// Last-in-first-out (LIFO) queue of verified messages. + queue: Vec>>, +} + +impl BlsToExecutionChanges { + pub fn existing_change_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.by_validator_index + .get(&address_change.message.validator_index) + .map(|existing| existing.as_inner() == address_change) + } + + pub fn insert( + &mut self, + verified_change: SigVerifiedOp, + ) -> bool { + // Wrap in an `Arc` once on insert. + let verified_change = Arc::new(verified_change); + match self + .by_validator_index + .entry(verified_change.as_inner().message.validator_index) + { + Entry::Vacant(entry) => { + self.queue.push(verified_change.clone()); + entry.insert(verified_change); + true + } + Entry::Occupied(_) => false, + } + } + + /// FIFO ordering, used for persistence to disk. + pub fn iter_fifo( + &self, + ) -> impl Iterator>> { + self.queue.iter() + } + + /// LIFO ordering, used for block packing. + pub fn iter_lifo( + &self, + ) -> impl Iterator>> { + self.queue.iter().rev() + } + + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. + /// + /// The block check is necessary to avoid pruning too eagerly and losing the ability to include + /// address changes during re-orgs. This is isn't *perfect* so some address changes could + /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished + /// due to the gossip duplicate rules. + pub fn prune>( + &mut self, + head_block: &SignedBeaconBlock, + head_state: &BeaconState, + spec: &ChainSpec, + ) { + let mut validator_indices_pruned = vec![]; + + self.queue.retain(|address_change| { + let validator_index = address_change.as_inner().message.validator_index; + head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| { + let prune = validator.has_eth1_withdrawal_credential(spec) + && head_block + .message() + .body() + .bls_to_execution_changes() + .map_or(true, |recent_changes| { + !recent_changes + .iter() + .any(|c| c.message.validator_index == validator_index) + }); + if prune { + validator_indices_pruned.push(validator_index); + } + !prune + }) + }); + + for validator_index in validator_indices_pruned { + self.by_validator_index.remove(&validator_index); + } + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70e0d56bc..4643addad 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -2,6 +2,7 @@ mod attestation; mod attestation_id; mod attestation_storage; mod attester_slashing; +mod bls_to_execution_changes; mod max_cover; mod metrics; mod persistence; @@ -18,6 +19,7 @@ pub use persistence::{ pub use reward_cache::RewardCache; use crate::attestation_storage::{AttestationMap, CheckpointKey}; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; @@ -51,8 +53,8 @@ pub struct OperationPool { proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>>, - /// Map from credential changing validator to their execution change data. - bls_to_execution_changes: RwLock>>, + /// Map from credential changing validator to their position in the queue. + bls_to_execution_changes: RwLock>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -513,15 +515,28 @@ impl OperationPool { ); } - /// Insert a BLS to execution change into the pool. + /// Check if an address change equal to `address_change` is already in the pool. + /// + /// Return `None` if no address change for the validator index exists in the pool. + pub fn bls_to_execution_change_in_pool_equals( + &self, + address_change: &SignedBlsToExecutionChange, + ) -> Option { + self.bls_to_execution_changes + .read() + .existing_change_equals(address_change) + } + + /// Insert a BLS to execution change into the pool, *only if* no prior change is known. + /// + /// Return `true` if the change was inserted. pub fn insert_bls_to_execution_change( &self, verified_change: SigVerifiedOp, - ) { - self.bls_to_execution_changes.write().insert( - verified_change.as_inner().message.validator_index, - verified_change, - ); + ) -> bool { + self.bls_to_execution_changes + .write() + .insert(verified_change) } /// Get a list of execution changes for inclusion in a block. @@ -533,7 +548,7 @@ impl OperationPool { spec: &ChainSpec, ) -> Vec { filter_limit_operations( - self.bls_to_execution_changes.read().values(), + self.bls_to_execution_changes.read().iter_lifo(), |address_change| { address_change.signature_is_still_valid(&state.fork()) && state @@ -548,33 +563,15 @@ impl OperationPool { } /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. - /// - /// The block check is necessary to avoid pruning too eagerly and losing the ability to include - /// address changes during re-orgs. This is isn't *perfect* so some address changes could - /// still get stuck if there are gnarly re-orgs and the changes can't be widely republished - /// due to the gossip duplicate rules. pub fn prune_bls_to_execution_changes>( &self, head_block: &SignedBeaconBlock, head_state: &BeaconState, spec: &ChainSpec, ) { - prune_validator_hash_map( - &mut self.bls_to_execution_changes.write(), - |validator_index, validator| { - validator.has_eth1_withdrawal_credential(spec) - && head_block - .message() - .body() - .bls_to_execution_changes() - .map_or(true, |recent_changes| { - !recent_changes - .iter() - .any(|c| c.message.validator_index == validator_index) - }) - }, - head_state, - ); + self.bls_to_execution_changes + .write() + .prune(head_block, head_state, spec) } /// Prune all types of transactions given the latest head state and head fork. @@ -663,8 +660,8 @@ impl OperationPool { pub fn get_all_bls_to_execution_changes(&self) -> Vec { self.bls_to_execution_changes .read() - .iter() - .map(|(_, address_change)| address_change.as_inner().clone()) + .iter_fifo() + .map(|address_change| address_change.as_inner().clone()) .collect() } } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 043e6fb7f..4948040ae 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,5 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; +use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -105,8 +106,8 @@ impl PersistedOperationPool { let bls_to_execution_changes = operation_pool .bls_to_execution_changes .read() - .iter() - .map(|(_, bls_to_execution_change)| bls_to_execution_change.clone()) + .iter_fifo() + .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) .collect(); PersistedOperationPool::V14(PersistedOperationPoolV14 { @@ -153,18 +154,13 @@ impl PersistedOperationPool { PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { return Err(OpPoolError::IncorrectOpPoolVariant) } - PersistedOperationPool::V14(pool) => RwLock::new( - pool.bls_to_execution_changes - .iter() - .cloned() - .map(|bls_to_execution_change| { - ( - bls_to_execution_change.as_inner().message.validator_index, - bls_to_execution_change, - ) - }) - .collect(), - ), + PersistedOperationPool::V14(pool) => { + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + for bls_to_execution_change in pool.bls_to_execution_changes { + bls_to_execution_changes.insert(bls_to_execution_change); + } + RwLock::new(bls_to_execution_changes) + } }; let op_pool = OperationPool { attestations, diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 752e472e2..1dc83d19b 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1012,6 +1012,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/bls_to_execution_changes` + pub async fn post_beacon_pool_bls_to_execution_changes( + &self, + address_changes: &[SignedBlsToExecutionChange], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("bls_to_execution_changes"); + + self.post(path, &address_changes).await?; + + Ok(()) + } + /// `GET beacon/deposit_snapshot` pub async fn get_deposit_snapshot(&self) -> Result, Error> { use ssz::Decode; diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index efd356462..50ac2ff3d 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -67,7 +67,7 @@ where fn new(op: T, state: &BeaconState) -> Self { let verified_against = VerifiedAgainst { fork_versions: op - .verification_epochs(state.current_epoch()) + .verification_epochs() .into_iter() .map(|epoch| state.fork().get_fork_version(epoch)) .collect(), @@ -89,13 +89,9 @@ where } pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { - // Pass the fork's epoch as the effective current epoch. If the message is a current-epoch - // style message like `SignedBlsToExecutionChange` then `get_fork_version` will return the - // current fork version and we'll check it matches the fork version the message was checked - // against. - let effective_current_epoch = current_fork.epoch; + // The .all() will return true if the iterator is empty. self.as_inner() - .verification_epochs(effective_current_epoch) + .verification_epochs() .into_iter() .zip(self.verified_against.fork_versions.iter()) .all(|(epoch, verified_fork_version)| { @@ -126,12 +122,8 @@ pub trait VerifyOperation: Encode + Decode + Sized { /// /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. /// - /// If the message contains no inherent epoch it should return the `current_epoch` that is - /// passed in, as that's the epoch at which it was verified. - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; + /// If the message is valid across all forks it should return an empty smallvec. + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl VerifyOperation for SignedVoluntaryExit { @@ -147,7 +139,7 @@ impl VerifyOperation for SignedVoluntaryExit { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![self.message.epoch] } } @@ -165,7 +157,7 @@ impl VerifyOperation for AttesterSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ self.attestation_1.data.target.epoch, self.attestation_2.data.target.epoch @@ -186,7 +178,7 @@ impl VerifyOperation for ProposerSlashing { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs(&self, _: Epoch) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { // Only need a single epoch because the slots of the two headers must be equal. smallvec![self .signed_header_1 @@ -209,10 +201,7 @@ impl VerifyOperation for SignedBlsToExecutionChange { } #[allow(clippy::integer_arithmetic)] - fn verification_epochs( - &self, - current_epoch: Epoch, - ) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { - smallvec![current_epoch] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![] } } diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index f6064f65a..cb73e43f9 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -28,6 +28,26 @@ pub struct BlsToExecutionChange { impl SignedRoot for BlsToExecutionChange {} +impl BlsToExecutionChange { + pub fn sign( + self, + secret_key: &SecretKey, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> SignedBlsToExecutionChange { + let domain = spec.compute_domain( + Domain::BlsToExecutionChange, + spec.genesis_fork_version, + genesis_validators_root, + ); + let message = self.signing_root(domain); + SignedBlsToExecutionChange { + message: self, + signature: secret_key.sign(message), + } + } +} + #[cfg(test)] mod tests { use super::*; From 2802bc9a9c052654a1dac3e79a1f6b7cc2cdcf6c Mon Sep 17 00:00:00 2001 From: naviechan Date: Tue, 24 Jan 2023 02:06:42 +0000 Subject: [PATCH 13/25] Implement sync_committee_rewards API (per-validator reward) (#3903) ## Issue Addressed [#3661](https://github.com/sigp/lighthouse/issues/3661) ## Proposed Changes `/eth/v1/beacon/rewards/sync_committee/{block_id}` ``` { "execution_optimistic": false, "finalized": false, "data": [ { "validator_index": "0", "reward": "2000" } ] } ``` The issue contains the implementation of three per-validator reward APIs: * `sync_committee_rewards` * [`attestation_rewards`](https://github.com/sigp/lighthouse/pull/3822) * `block_rewards` This PR only implements the `sync_committe_rewards `. The endpoints can be viewed in the Ethereum Beacon nodes API browser: [https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards) ## Additional Info The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three). Co-authored-by: navie Co-authored-by: kevinbogner --- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/lib.rs | 1 + .../src/sync_committee_rewards.rs | 87 +++++++++++++ beacon_node/beacon_chain/src/test_utils.rs | 25 ++++ beacon_node/beacon_chain/tests/main.rs | 1 + beacon_node/beacon_chain/tests/rewards.rs | 121 ++++++++++++++++++ beacon_node/http_api/src/lib.rs | 37 ++++++ .../http_api/src/sync_committee_rewards.rs | 77 +++++++++++ common/eth2/src/lib.rs | 18 +++ common/eth2/src/lighthouse.rs | 2 + .../src/lighthouse/sync_committee_rewards.rs | 12 ++ 11 files changed, 382 insertions(+) create mode 100644 beacon_node/beacon_chain/src/sync_committee_rewards.rs create mode 100644 beacon_node/beacon_chain/tests/rewards.rs create mode 100644 beacon_node/http_api/src/sync_committee_rewards.rs create mode 100644 common/eth2/src/lighthouse/sync_committee_rewards.rs diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 17f58b223..24ea07833 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -160,6 +160,7 @@ pub enum BeaconChainError { BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, + SyncCommitteeRewardsSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), HeadBlockMissingFromForkChoice(Hash256), diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ae1c5e4b7..ae3e98f91 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -40,6 +40,7 @@ pub mod schema_change; mod shuffling_cache; mod snapshot_cache; pub mod state_advance_timer; +pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs new file mode 100644 index 000000000..561fed1a8 --- /dev/null +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -0,0 +1,87 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; + +use eth2::lighthouse::SyncCommitteeReward; +use safe_arith::SafeArith; +use slog::error; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use std::collections::HashMap; +use store::RelativeEpoch; +use types::{BeaconBlockRef, BeaconState, ExecPayload}; + +impl BeaconChain { + pub fn compute_sync_committee_rewards>( + &self, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, + state: &mut BeaconState, + ) -> Result, BeaconChainError> { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let spec = &self.spec; + + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + let sync_aggregate = block.body().sync_aggregate()?; + + let sync_committee = state.current_sync_committee()?.clone(); + + let sync_committee_indices = state.get_sync_committee_indices(&sync_committee)?; + + let (participant_reward_value, proposer_reward_per_bit) = + compute_sync_aggregate_rewards(state, spec).map_err(|e| { + error!( + self.log, "Error calculating sync aggregate rewards"; + "error" => ?e + ); + BeaconChainError::SyncCommitteeRewardsSyncError + })?; + + let mut balances = HashMap::::new(); + + let mut total_proposer_rewards = 0; + let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + + // Apply rewards to participant balances. Keep track of proposer rewards + for (validator_index, participant_bit) in sync_committee_indices + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + { + let participant_balance = balances + .entry(*validator_index) + .or_insert_with(|| state.balances()[*validator_index]); + + if participant_bit { + participant_balance.safe_add_assign(participant_reward_value)?; + + balances + .entry(proposer_index) + .or_insert_with(|| state.balances()[proposer_index]) + .safe_add_assign(proposer_reward_per_bit)?; + + total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; + } else { + *participant_balance = participant_balance.saturating_sub(participant_reward_value); + } + } + + Ok(balances + .iter() + .filter_map(|(i, new_balance)| { + let reward = if *i != proposer_index { + *new_balance as i64 - state.balances()[*i] as i64 + } else if sync_committee_indices.contains(i) { + *new_balance as i64 + - state.balances()[*i] as i64 + - total_proposer_rewards as i64 + } else { + return None; + }; + Some(SyncCommitteeReward { + validator_index: *i as u64, + reward, + }) + }) + .collect()) + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 66de3f02d..749487dc5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + sync_committee_verification::Error as SyncCommitteeError, validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; @@ -1980,6 +1981,30 @@ where (honest_head, faulty_head) } + + pub fn process_sync_contributions( + &self, + sync_contributions: HarnessSyncContributions, + ) -> Result<(), SyncCommitteeError> { + let mut verified_contributions = Vec::with_capacity(sync_contributions.len()); + + for (_, contribution_and_proof) in sync_contributions { + let signed_contribution_and_proof = contribution_and_proof.unwrap(); + + let verified_contribution = self + .chain + .verify_sync_contribution_for_gossip(signed_contribution_and_proof)?; + + verified_contributions.push(verified_contribution); + } + + for verified_contribution in verified_contributions { + self.chain + .add_contribution_to_block_inclusion_pool(verified_contribution)?; + } + + Ok(()) + } } // Junk `Debug` impl to satistfy certain trait bounds during testing. diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 1c61e9927..eceb4f2e8 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -4,6 +4,7 @@ mod block_verification; mod merge; mod op_verification; mod payload_invalidation; +mod rewards; mod store_tests; mod sync_committee_verification; mod tests; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs new file mode 100644 index 000000000..b61bea124 --- /dev/null +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -0,0 +1,121 @@ +#![cfg(test)] + +use std::collections::HashMap; + +use beacon_chain::test_utils::{ + generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, +}; +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, + types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, +}; +use lazy_static::lazy_static; + +pub const VALIDATOR_COUNT: usize = 64; + +lazy_static! { + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_harness() -> BeaconChainHarness> { + let mut spec = E::default_spec(); + + spec.altair_fork_epoch = Some(Epoch::new(0)); // We use altair for all tests + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .keypairs(KEYPAIRS.to_vec()) + .fresh_ephemeral_store() + .build(); + + harness.advance_slot(); + + harness +} + +#[tokio::test] +async fn test_sync_committee_rewards() { + let num_block_produced = MinimalEthSpec::slots_per_epoch(); + let harness = get_harness::(); + + let latest_block_root = harness + .extend_chain( + num_block_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Create and add sync committee message to op_pool + let sync_contributions = harness.make_sync_contributions( + &harness.get_current_state(), + latest_block_root, + harness.get_current_slot(), + RelativeSyncCommittee::Current, + ); + + harness + .process_sync_contributions(sync_contributions) + .unwrap(); + + // Add block + let chain = &harness.chain; + let (head_state, head_state_root) = harness.get_current_state_and_root(); + let target_slot = harness.get_current_slot() + 1; + + let (block_root, mut state) = harness + .add_attested_block_at_slot(target_slot, head_state, head_state_root, &[]) + .await + .unwrap(); + + let block = harness.get_block(block_root).unwrap(); + let parent_block = chain + .get_blinded_block(&block.parent_root()) + .unwrap() + .unwrap(); + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .unwrap(); + + let rewards = reward_payload + .iter() + .map(|reward| (reward.validator_index, reward.reward)) + .collect::>(); + + let proposer_index = state + .get_beacon_proposer_index(target_slot, &MinimalEthSpec::default_spec()) + .unwrap(); + + let mut mismatches = vec![]; + + for validator in state.validators() { + let validator_index = state + .clone() + .get_validator_index(&validator.pubkey) + .unwrap() + .unwrap(); + let pre_state_balance = parent_state.balances()[validator_index]; + let post_state_balance = state.balances()[validator_index]; + let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); + + if validator_index == proposer_index { + continue; // Ignore proposer + } + + if pre_state_balance as i64 + *sync_committee_reward != post_state_balance as i64 { + mismatches.push(validator_index.to_string()); + } + } + + assert_eq!( + mismatches.len(), + 0, + "Expect 0 mismatches, but these validators have mismatches on balance: {} ", + mismatches.join(",") + ); +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8cd0b856b..1399bb99a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -16,6 +16,7 @@ mod metrics; mod proposer_duties; mod publish_blocks; mod state_id; +mod sync_committee_rewards; mod sync_committees; mod ui; mod validator_inclusion; @@ -1699,6 +1700,41 @@ pub fn serve( }, ); + /* + * beacon/rewards + */ + + let beacon_rewards_path = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(chain_filter.clone()); + + // POST beacon/rewards/sync_committee/{block_id} + let post_beacon_rewards_sync_committee = beacon_rewards_path + .clone() + .and(warp::path("sync_committee")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(warp::body::json()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger| { + blocking_json_task(move || { + let (rewards, execution_optimistic) = + sync_committee_rewards::compute_sync_committee_rewards( + chain, block_id, validators, log, + )?; + + Ok(rewards) + .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) + }, + ); + /* * config */ @@ -3396,6 +3432,7 @@ pub fn serve( .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) .or(post_beacon_pool_sync_committees.boxed()) + .or(post_beacon_rewards_sync_committee.boxed()) .or(post_validator_duties_attester.boxed()) .or(post_validator_duties_sync.boxed()) .or(post_validator_aggregate_and_proofs.boxed()) diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs new file mode 100644 index 000000000..ae369115d --- /dev/null +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -0,0 +1,77 @@ +use crate::{BlockId, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::SyncCommitteeReward; +use eth2::types::ValidatorId; +use slog::{debug, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use types::{BeaconState, SignedBlindedBeaconBlock}; +use warp_utils::reject::{beacon_chain_error, custom_not_found}; + +pub fn compute_sync_committee_rewards( + chain: Arc>, + block_id: BlockId, + validators: Vec, + log: Logger, +) -> Result<(Option>, ExecutionOptimistic), warp::Rejection> { + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + let mut state = get_state_before_applying_block(chain.clone(), &block)?; + + let reward_payload = chain + .compute_sync_committee_rewards(block.message(), &mut state) + .map_err(beacon_chain_error)?; + + let data = if reward_payload.is_empty() { + debug!(log, "compute_sync_committee_rewards returned empty"); + None + } else if validators.is_empty() { + Some(reward_payload) + } else { + Some( + reward_payload + .into_iter() + .filter(|reward| { + validators.iter().any(|validator| match validator { + ValidatorId::Index(i) => reward.validator_index == *i, + ValidatorId::PublicKey(pubkey) => match state.get_validator_index(pubkey) { + Ok(Some(i)) => reward.validator_index == i as u64, + _ => false, + }, + }) + }) + .collect::>(), + ) + }; + + Ok((data, execution_optimistic)) +} + +fn get_state_before_applying_block( + chain: Arc>, + block: &SignedBlindedBeaconBlock, +) -> Result, warp::reject::Rejection> { + let parent_block: SignedBlindedBeaconBlock = chain + .get_blinded_block(&block.parent_root()) + .and_then(|maybe_block| { + maybe_block.ok_or_else(|| BeaconChainError::MissingBeaconBlock(block.parent_root())) + }) + .map_err(|e| custom_not_found(format!("Parent block is not available! {:?}", e)))?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .and_then(|maybe_state| { + maybe_state + .ok_or_else(|| BeaconChainError::MissingBeaconState(parent_block.state_root())) + }) + .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + + let replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + Ok(replayer.into_state()) +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 58b4c88b3..00b664446 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1026,6 +1026,24 @@ impl BeaconNodeHttpClient { .transpose() } + /// `POST beacon/rewards/sync_committee` + pub async fn post_beacon_rewards_sync_committee( + &self, + rewards: &[Option>], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("rewards") + .push("sync_committee"); + + self.post(path, &rewards).await?; + + Ok(()) + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 2dced1c44..068abd693 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -3,6 +3,7 @@ mod attestation_performance; mod block_packing_efficiency; mod block_rewards; +mod sync_committee_rewards; use crate::{ ok_or_error, @@ -27,6 +28,7 @@ pub use block_packing_efficiency::{ }; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; +pub use sync_committee_rewards::SyncCommitteeReward; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs new file mode 100644 index 000000000..cdd685065 --- /dev/null +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +// Details about the rewards paid to sync committee members for attesting headers +// All rewards in GWei + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct SyncCommitteeReward { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub validator_index: u64, + // sync committee reward in gwei for the validator + pub reward: i64, +} From 3d4dd6af7511874c532ca01d2312a73f22179d5c Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 24 Jan 2023 16:22:51 +0100 Subject: [PATCH 14/25] Use eth1_withdrawal_credentials in Test States (#3898) * Use eth1_withdrawal_credential in Some Test States * Update beacon_node/genesis/src/interop.rs Co-authored-by: Michael Sproul * Update beacon_node/genesis/src/interop.rs Co-authored-by: Michael Sproul * Increase validator sizes * Pick next sync committee message Co-authored-by: Michael Sproul Co-authored-by: Paul Hauner --- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/store_tests.rs | 24 +-- .../tests/sync_committee_verification.rs | 7 +- beacon_node/beacon_chain/tests/tests.rs | 2 +- beacon_node/genesis/src/interop.rs | 157 +++++++++++++++++- beacon_node/genesis/src/lib.rs | 4 +- consensus/types/src/beacon_state/tests.rs | 4 +- 7 files changed, 175 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e71c1a987..247764080 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -22,7 +22,7 @@ use execution_layer::{ }; use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use parking_lot::Mutex; @@ -191,7 +191,7 @@ impl Builder> { .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), @@ -252,7 +252,7 @@ impl Builder> { .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { - let genesis_state = interop_genesis_state::( + let genesis_state = interop_genesis_state_with_eth1::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8a6ea9cfe..622ea7aec 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1013,8 +1013,8 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1123,8 +1123,8 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1255,8 +1255,8 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { #[tokio::test] async fn pruning_does_not_touch_blocks_prior_to_finalization() { - const HONEST_VALIDATOR_COUNT: usize = 16; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1350,8 +1350,8 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1495,8 +1495,8 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); @@ -1624,8 +1624,8 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 16 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32 + 0; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 1e51b0ffb..239f55e7d 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -45,6 +45,7 @@ fn get_valid_sync_committee_message( harness: &BeaconChainHarness>, slot: Slot, relative_sync_committee: RelativeSyncCommittee, + message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; @@ -52,7 +53,7 @@ fn get_valid_sync_committee_message( .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") - .get(0) + .get(message_index) .expect("first sync message should exist") .clone(); @@ -494,7 +495,7 @@ async fn unaggregated_gossip_verification() { let current_slot = harness.chain.slot().expect("should get slot"); let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = - get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -644,7 +645,7 @@ async fn unaggregated_gossip_verification() { // **Incorrectly** create a sync message using the current sync committee let (next_valid_sync_committee_message, _, _, next_subnet_id) = - get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current); + get_valid_sync_committee_message(&harness, target_slot, RelativeSyncCommittee::Current, 1); assert_invalid!( "sync message on incorrect subnet", diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d80db132e..384fcbe5d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -19,7 +19,7 @@ use types::{ }; // Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; +pub const VALIDATOR_COUNT: usize = 48; lazy_static! { /// A cached set of keys. diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index d8c25baec..f24e94d1b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,6 +10,20 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; +fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) +} + +fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { + let fake_execution_address = &hash(&pubkey.as_ssz_bytes())[0..20]; + let mut credentials = [0u8; 32]; + credentials[0] = spec.eth1_address_withdrawal_prefix_byte; + credentials[12..].copy_from_slice(fake_execution_address); + Hash256::from_slice(&credentials) +} + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: @@ -24,17 +38,67 @@ pub fn interop_genesis_state( let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) - }; - let datas = keypairs .into_par_iter() .map(|keypair| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(&keypair.pk), + withdrawal_credentials: bls_withdrawal_credentials(&keypair.pk, spec), + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty().into(), + }; + + data.signature = data.create_signature(&keypair.sk, spec); + + data + }) + .collect::>(); + + let mut state = initialize_beacon_state_from_eth1( + eth1_block_hash, + eth1_timestamp, + genesis_deposits(datas, spec)?, + execution_payload_header, + spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + *state.genesis_time_mut() = genesis_time; + + // Invalidate all the caches after all the manual state surgery. + state + .drop_all_caches() + .map_err(|e| format!("Unable to drop caches: {:?}", e))?; + + Ok(state) +} + +// returns an interop genesis state except every other +// validator has eth1 withdrawal credentials +pub fn interop_genesis_state_with_eth1( + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + + let withdrawal_credentials = |index: usize, pubkey: &PublicKey| { + if index % 2 == 0 { + bls_withdrawal_credentials(pubkey, spec) + } else { + eth1_withdrawal_credentials(pubkey, spec) + } + }; + + let datas = keypairs + .into_par_iter() + .enumerate() + .map(|(index, keypair)| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(index, &keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), @@ -133,4 +197,83 @@ mod test { "validator count should be correct" ); } + + #[test] + fn interop_state_with_eth1() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state_with_eth1::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); + + assert_eq!( + state.eth1_data().block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time(), + genesis_time, + "genesis time should be as specified" + ); + + for b in state.balances() { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for (index, v) in state.validators().iter().enumerate() { + let creds = v.withdrawal_credentials.as_bytes(); + if index % 2 == 0 { + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ); + } else { + assert_eq!( + creds[0], spec.eth1_address_withdrawal_prefix_byte, + "first byte of withdrawal creds should be eth1 prefix" + ); + assert_eq!( + creds[1..12], + [0u8; 11], + "bytes [1:12] of withdrawal creds must be zero" + ); + assert_eq!( + &creds[12..], + &hash(&v.pubkey.as_ssz_bytes())[0..20], + "rest of withdrawal creds should be first 20 bytes of pubkey hash" + ) + } + } + + assert_eq!( + state.balances().len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators().len(), + validator_count, + "validator count should be correct" + ); + } } diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1233d99fd..4d5439ac1 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -5,5 +5,7 @@ mod interop; pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; -pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +pub use interop::{ + interop_genesis_state, interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH, +}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index abca10e37..d63eaafc4 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -2,7 +2,7 @@ use crate::test_utils::*; use crate::test_utils::{SeedableRng, XorShiftRng}; use beacon_chain::test_utils::{ - interop_genesis_state, test_spec, BeaconChainHarness, EphemeralHarnessType, + interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ @@ -551,7 +551,7 @@ fn tree_hash_cache_linear_history_long_skip() { let spec = &test_spec::(); // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state( + let mut state: BeaconState = interop_genesis_state_with_eth1( &keypairs, 0, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), From a7351c00c0354f54981a929cc9bb1f4c31f43322 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 24 Jan 2023 22:17:50 +0000 Subject: [PATCH 15/25] light client optimistic update reprocessing (#3799) ## Issue Addressed Currently there is a race between receiving blocks and receiving light client optimistic updates (in unstable), which results in processing errors. This is a continuation of PR #3693 and seeks to progress on issue #3651 ## Proposed Changes Add the parent_root to ReprocessQueueMessage::BlockImported so we can remove blocks from queue when a block arrives that has the same parent root. We use the parent root as opposed to the block_root because the LightClientOptimisticUpdate does not contain the block_root. If light_client_optimistic_update.attested_header.canonical_root() != head_block.message().parent_root() then we queue the update. Otherwise we process immediately. ## Additional Info michaelsproul came up with this idea. The code was heavily based off of the attestation reprocessing. I have not properly tested this to see if it works as intended. --- ...t_client_optimistic_update_verification.rs | 15 ++ .../network/src/beacon_processor/mod.rs | 50 ++++- .../work_reprocessing_queue.rs | 200 +++++++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 122 ++++++++--- .../beacon_processor/worker/sync_methods.rs | 6 +- beacon_node/network/src/metrics.rs | 15 ++ 6 files changed, 371 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index ec9c90e73..20d718180 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -2,6 +2,7 @@ use crate::{ beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; +use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; @@ -36,6 +37,8 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, + /// Unknown block with parent root. + UnknownBlockParentRoot(Hash256), /// Beacon chain error occured. BeaconChainError(BeaconChainError), LightClientUpdateError(LightClientUpdateError), @@ -58,6 +61,7 @@ impl From for Error { #[derivative(Clone(bound = "T: BeaconChainTypes"))] pub struct VerifiedLightClientOptimisticUpdate { light_client_optimistic_update: LightClientOptimisticUpdate, + pub parent_root: Hash256, seen_timestamp: Duration, } @@ -107,6 +111,16 @@ impl VerifiedLightClientOptimisticUpdate { None => return Err(Error::SigSlotStartIsNone), } + // check if we can process the optimistic update immediately + // otherwise queue + let canonical_root = light_client_optimistic_update + .attested_header + .canonical_root(); + + if canonical_root != head_block.message().parent_root() { + return Err(Error::UnknownBlockParentRoot(canonical_root)); + } + let optimistic_update = LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; @@ -119,6 +133,7 @@ impl VerifiedLightClientOptimisticUpdate { Ok(Self { light_client_optimistic_update, + parent_root: canonical_root, seen_timestamp, }) } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 743a97a29..8118443a6 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -67,7 +67,8 @@ use types::{ SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, + QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -137,6 +138,10 @@ const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; +/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored +/// for reprocessing before we start dropping them. +const MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN: usize = 128; + /// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping /// them. const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; @@ -213,6 +218,7 @@ pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; +pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; /// A simple first-in-first-out queue with a maximum length. struct FifoQueue { @@ -694,6 +700,21 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::LightClientUpdate(QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update, + seen_timestamp, + .. + }) => Self { + drop_during_sync: true, + work: Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + }, + }, } } } @@ -733,6 +754,12 @@ pub enum Work { aggregate: Box>, seen_timestamp: Duration, }, + UnknownLightClientOptimisticUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + }, GossipAggregateBatch { packages: Vec>, }, @@ -845,6 +872,7 @@ impl Work { Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, + Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, } } } @@ -979,6 +1007,8 @@ impl BeaconProcessor { // Using a FIFO queue for light client updates to maintain sequence order. let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + let mut unknown_light_client_update_queue = + FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN); // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); @@ -1346,6 +1376,9 @@ impl BeaconProcessor { Work::UnknownBlockAggregate { .. } => { unknown_block_aggregate_queue.push(work) } + Work::UnknownLightClientOptimisticUpdate { .. } => { + unknown_light_client_update_queue.push(work, work_id, &self.log) + } } } } @@ -1665,6 +1698,7 @@ impl BeaconProcessor { message_id, peer_id, *light_client_optimistic_update, + Some(work_reprocessing_tx), seen_timestamp, ) }), @@ -1787,6 +1821,20 @@ impl BeaconProcessor { seen_timestamp, ) }), + Work::UnknownLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + None, + seen_timestamp, + ) + }), }; } } diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 2aeec11c3..8c568a7ee 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -19,7 +19,7 @@ use futures::task::Poll; use futures::{Stream, StreamExt}; use lighthouse_network::{MessageId, PeerId}; use logging::TimeLatch; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; @@ -30,12 +30,16 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; +use types::{ + Attestation, EthSpec, Hash256, LightClientOptimisticUpdate, SignedAggregateAndProof, + SignedBeaconBlock, SubnetId, +}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; const GOSSIP_BLOCKS: &str = "gossip_blocks"; const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; +const LIGHT_CLIENT_UPDATES: &str = "lc_updates"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. /// This is to account for any slight drift in the system clock. @@ -44,6 +48,9 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue light client updates for re-processing. +pub const QUEUED_LIGHT_CLIENT_UPDATE_DELAY: Duration = Duration::from_secs(12); + /// For how long to queue rpc blocks before sending them back for reprocessing. pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); @@ -55,6 +62,9 @@ const MAXIMUM_QUEUED_BLOCKS: usize = 16; /// How many attestations we keep before new ones get dropped. const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; +/// How many light client updates we keep before new ones get dropped. +const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128; + /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. @@ -62,13 +72,18 @@ pub enum ReprocessQueueMessage { /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), - /// A block that was successfully processed. We use this to handle attestations for unknown - /// blocks. - BlockImported(Hash256), + /// A block that was successfully processed. We use this to handle attestations and light client updates + /// for unknown blocks. + BlockImported { + block_root: Hash256, + parent_root: Hash256, + }, /// An unaggregated attestation that references an unknown block. UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. UnknownBlockAggregate(QueuedAggregate), + /// A light client optimistic update that references a parent root that has not been seen as a parent. + UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate), } /// Events sent by the scheduler once they are ready for re-processing. @@ -77,6 +92,7 @@ pub enum ReadyWork { RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), + LightClientUpdate(QueuedLightClientUpdate), } /// An Attestation for which the corresponding block was not seen while processing, queued for @@ -99,6 +115,16 @@ pub struct QueuedAggregate { pub seen_timestamp: Duration, } +/// A light client update for which the corresponding parent block was not seen while processing, +/// queued for later. +pub struct QueuedLightClientUpdate { + pub peer_id: PeerId, + pub message_id: MessageId, + pub light_client_optimistic_update: Box>, + pub parent_root: Hash256, + pub seen_timestamp: Duration, +} + /// A block that arrived early and has been queued for later import. pub struct QueuedGossipBlock { pub peer_id: PeerId, @@ -127,6 +153,8 @@ enum InboundEvent { ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), + /// A light client update that is ready for re-processing. + ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A `DelayQueue` returned an error. DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` @@ -147,6 +175,8 @@ struct ReprocessQueue { rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, + /// Queue to manage scheduled light client updates. + lc_updates_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. @@ -157,15 +187,23 @@ struct ReprocessQueue { queued_unaggregates: FnvHashMap, DelayKey)>, /// Attestations (aggregated and unaggregated) per root. awaiting_attestations_per_root: HashMap>, + /// Queued Light Client Updates. + queued_lc_updates: FnvHashMap, DelayKey)>, + /// Light Client Updates per parent_root. + awaiting_lc_updates_per_parent_root: HashMap>, /* Aux */ /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, + next_lc_update: usize, early_block_debounce: TimeLatch, rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, + lc_update_delay_debounce: TimeLatch, } +pub type QueuedLightClientUpdateId = usize; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum QueuedAttestationId { Aggregate(usize), @@ -235,6 +273,20 @@ impl Stream for ReprocessQueue { Poll::Ready(None) | Poll::Pending => (), } + match self.lc_updates_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(lc_id))) => { + return Poll::Ready(Some(InboundEvent::ReadyLightClientUpdate( + lc_id.into_inner(), + ))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "lc_updates_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + // Last empty the messages channel. match self.work_reprocessing_rx.poll_recv(cx) { Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))), @@ -264,14 +316,19 @@ pub fn spawn_reprocess_scheduler( gossip_block_delay_queue: DelayQueue::new(), rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), + lc_updates_delay_queue: DelayQueue::new(), queued_gossip_block_roots: HashSet::new(), + queued_lc_updates: FnvHashMap::default(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), + awaiting_lc_updates_per_parent_root: HashMap::new(), next_attestation: 0, + next_lc_update: 0, early_block_debounce: TimeLatch::default(), rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), + lc_update_delay_debounce: TimeLatch::default(), }; executor.spawn( @@ -473,9 +530,49 @@ impl ReprocessQueue { self.next_attestation += 1; } - InboundEvent::Msg(BlockImported(root)) => { + InboundEvent::Msg(UnknownLightClientOptimisticUpdate( + queued_light_client_optimistic_update, + )) => { + if self.lc_updates_delay_queue.len() >= MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES { + if self.lc_update_delay_debounce.elapsed() { + error!( + log, + "Light client updates delay queue is full"; + "queue_size" => MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES, + "msg" => "check system clock" + ); + } + // Drop the light client update. + return; + } + + let lc_id: QueuedLightClientUpdateId = self.next_lc_update; + + // Register the delay. + let delay_key = self + .lc_updates_delay_queue + .insert(lc_id, QUEUED_LIGHT_CLIENT_UPDATE_DELAY); + + // Register the light client update for the corresponding root. + self.awaiting_lc_updates_per_parent_root + .entry(queued_light_client_optimistic_update.parent_root) + .or_default() + .push(lc_id); + + // Store the light client update and its info. + self.queued_lc_updates.insert( + self.next_lc_update, + (queued_light_client_optimistic_update, delay_key), + ); + + self.next_lc_update += 1; + } + InboundEvent::Msg(BlockImported { + block_root, + parent_root, + }) => { // Unqueue the attestations we have for this root, if any. - if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&root) { + if let Some(queued_ids) = self.awaiting_attestations_per_root.remove(&block_root) { for id in queued_ids { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS, @@ -511,12 +608,62 @@ impl ReprocessQueue { error!( log, "Unknown queued attestation for block root"; - "block_root" => ?root, + "block_root" => ?block_root, "att_id" => ?id, ); } } } + // Unqueue the light client optimistic updates we have for this root, if any. + if let Some(queued_lc_id) = self + .awaiting_lc_updates_per_parent_root + .remove(&parent_root) + { + debug!( + log, + "Dequeuing light client optimistic updates"; + "parent_root" => %parent_root, + "count" => queued_lc_id.len(), + ); + + for lc_id in queued_lc_id { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES, + ); + if let Some((work, delay_key)) = self.queued_lc_updates.remove(&lc_id).map( + |(light_client_optimistic_update, delay_key)| { + ( + ReadyWork::LightClientUpdate(light_client_optimistic_update), + delay_key, + ) + }, + ) { + // Remove the delay + self.lc_updates_delay_queue.remove(&delay_key); + + // Send the work + match self.ready_work_tx.try_send(work) { + Ok(_) => trace!( + log, + "reprocessing light client update sent"; + ), + Err(_) => error!( + log, + "Failed to send scheduled light client update"; + ), + } + } else { + // There is a mismatch between the light client update ids registered for this + // root and the queued light client updates. This should never happen. + error!( + log, + "Unknown queued light client update for parent root"; + "parent_root" => ?parent_root, + "lc_id" => ?lc_id, + ); + } + } + } } // A block that was queued for later processing is now ready to be processed. InboundEvent::ReadyGossipBlock(ready_block) => { @@ -591,6 +738,38 @@ impl ReprocessQueue { } } } + InboundEvent::ReadyLightClientUpdate(queued_id) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES, + ); + + if let Some((parent_root, work)) = self.queued_lc_updates.remove(&queued_id).map( + |(queued_lc_update, _delay_key)| { + ( + queued_lc_update.parent_root, + ReadyWork::LightClientUpdate(queued_lc_update), + ) + }, + ) { + if self.ready_work_tx.try_send(work).is_err() { + error!( + log, + "Failed to send scheduled light client optimistic update"; + ); + } + + if let Some(queued_lc_updates) = self + .awaiting_lc_updates_per_parent_root + .get_mut(&parent_root) + { + if let Some(index) = + queued_lc_updates.iter().position(|&id| id == queued_id) + { + queued_lc_updates.swap_remove(index); + } + } + } + } } metrics::set_gauge_vec( @@ -608,5 +787,10 @@ impl ReprocessQueue { &[ATTESTATIONS], self.attestations_delay_queue.len() as i64, ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[LIGHT_CLIENT_UPDATES], + self.lc_updates_delay_queue.len() as i64, + ); } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index c142359f3..3601ccb19 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -28,7 +28,8 @@ use types::{ use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, + ReprocessQueueMessage, }, Worker, }; @@ -953,7 +954,10 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::BlockImported(block_root)) + .try_send(ReprocessQueueMessage::BlockImported { + block_root, + parent_root: block.message().parent_root(), + }) .is_err() { error!( @@ -1330,7 +1334,7 @@ impl Worker { LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { debug!( self.log, - "LC invalid finality update"; + "Light client invalid finality update"; "peer" => %peer_id, "error" => ?e, ); @@ -1344,7 +1348,7 @@ impl Worker { LightClientFinalityUpdateError::TooEarly => { debug!( self.log, - "LC finality update too early"; + "Light client finality update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1357,7 +1361,7 @@ impl Worker { } LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( self.log, - "LC finality update already seen"; + "Light client finality update already seen"; "peer" => %peer_id, "error" => ?e, ), @@ -1366,7 +1370,7 @@ impl Worker { | LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( self.log, - "LC error constructing finality update"; + "Light client error constructing finality update"; "peer" => %peer_id, "error" => ?e, ), @@ -1381,22 +1385,77 @@ impl Worker { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, + reprocess_tx: Option>>, seen_timestamp: Duration, ) { - match self - .chain - .verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp) - { - Ok(_verified_light_client_optimistic_update) => { + match self.chain.verify_optimistic_update_for_gossip( + light_client_optimistic_update.clone(), + seen_timestamp, + ) { + Ok(verified_light_client_optimistic_update) => { + debug!( + self.log, + "Light client successful optimistic update"; + "peer" => %peer_id, + "parent_root" => %verified_light_client_optimistic_update.parent_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); } Err(e) => { - metrics::register_optimistic_update_error(&e); match e { - LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + LightClientOptimisticUpdateError::UnknownBlockParentRoot(parent_root) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, + ); debug!( self.log, - "LC invalid optimistic update"; + "Optimistic update for unknown block"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + if let Some(sender) = reprocess_tx { + let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( + QueuedLightClientUpdate { + peer_id, + message_id, + light_client_optimistic_update: Box::new( + light_client_optimistic_update, + ), + parent_root, + seen_timestamp, + }, + ); + + if sender.try_send(msg).is_err() { + error!( + self.log, + "Failed to send optimistic update for re-processing"; + ) + } + } else { + debug!( + self.log, + "Not sending light client update because it had been reprocessed"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + return; + } + LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client invalid optimistic update"; "peer" => %peer_id, "error" => ?e, ); @@ -1408,9 +1467,10 @@ impl Worker { ) } LightClientOptimisticUpdateError::TooEarly => { + metrics::register_optimistic_update_error(&e); debug!( self.log, - "LC optimistic update too early"; + "Light client optimistic update too early"; "peer" => %peer_id, "error" => ?e, ); @@ -1421,21 +1481,29 @@ impl Worker { "light_client_gossip_error", ); } - LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!( - self.log, - "LC optimistic update already seen"; - "peer" => %peer_id, - "error" => ?e, - ), + LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client optimistic update already seen"; + "peer" => %peer_id, + "error" => ?e, + ) + } LightClientOptimisticUpdateError::BeaconChainError(_) | LightClientOptimisticUpdateError::LightClientUpdateError(_) | LightClientOptimisticUpdateError::SigSlotStartIsNone - | LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!( - self.log, - "LC error constructing optimistic update"; - "peer" => %peer_id, - "error" => ?e, - ), + | LightClientOptimisticUpdateError::FailedConstructingUpdate => { + metrics::register_optimistic_update_error(&e); + + debug!( + self.log, + "Light client error constructing optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ) + } } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 1ec045e97..6e6e68155 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -84,6 +84,7 @@ impl Worker { } }; let slot = block.slot(); + let parent_root = block.message().parent_root(); let result = self .chain .process_block( @@ -101,7 +102,10 @@ impl Worker { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); // Trigger processing for work referencing this block. - let reprocess_msg = ReprocessQueueMessage::BlockImported(hash); + let reprocess_msg = ReprocessQueueMessage::BlockImported { + block_root: hash, + parent_root, + }; if reprocess_tx.try_send(reprocess_msg).is_err() { error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) }; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index baf00720b..8dc76877a 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -370,6 +370,21 @@ lazy_static! { "Number of queued attestations where as matching block has been imported." ); + /* + * Light client update reprocessing queue metrics. + */ + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_expired_optimistic_updates", + "Number of queued light client optimistic updates which have expired before a matching block has been found." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_matched_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); + pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES: Result = try_create_int_counter( + "beacon_processor_reprocessing_queue_sent_optimistic_updates", + "Number of queued light client optimistic updates where as matching block has been imported." + ); } pub fn update_bandwidth_metrics(bandwidth: Arc) { From e8d1dd4e7c33de5a5a24e87a50811c5dcd43c6bc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 02:17:10 +0000 Subject: [PATCH 16/25] Fix docs for `oldest_block_slot` (#3911) ## Proposed Changes Update the docs to correct the description of `oldest_block_slot`. Credit to `laern` on Discord for noticing this. --- book/src/checkpoint-sync.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 893c545cb..47dc03b20 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -97,7 +97,7 @@ You can opt-in to reconstructing all of the historic states by providing the The database keeps track of three markers to determine the availability of historic blocks and states: -* `oldest_block_slot`: All blocks with slots less than or equal to this value are available in the +* `oldest_block_slot`: All blocks with slots greater than or equal to this value are available in the database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. From 79a20e8a5fae598e0f832c8bc35756b1849cf21a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 15:46:47 +1100 Subject: [PATCH 17/25] Update sync rewards API for abstract exec payload --- beacon_node/beacon_chain/src/sync_committee_rewards.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 561fed1a8..2221aa1d5 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -6,10 +6,10 @@ use slog::error; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; use std::collections::HashMap; use store::RelativeEpoch; -use types::{BeaconBlockRef, BeaconState, ExecPayload}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState}; impl BeaconChain { - pub fn compute_sync_committee_rewards>( + pub fn compute_sync_committee_rewards>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, From e48487db01d128f50c3acf5444565b5b777aecd5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 15:47:07 +1100 Subject: [PATCH 18/25] Fix the new BLS to execution change test --- Cargo.lock | 1 + beacon_node/beacon_chain/src/test_utils.rs | 16 +--- beacon_node/genesis/src/interop.rs | 91 ++++++++++--------- beacon_node/genesis/src/lib.rs | 3 +- beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/tests/common.rs | 28 ++++-- beacon_node/http_api/tests/fork_tests.rs | 45 ++++++++- .../http_api/tests/interactive_tests.rs | 3 +- 8 files changed, 123 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d3849cf6..18426b9e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3215,6 +3215,7 @@ dependencies = [ "eth2_ssz", "execution_layer", "futures", + "genesis", "hex", "lazy_static", "lighthouse_metrics", diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 51a5b6415..c5da51899 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -172,17 +172,6 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); - // For the interop genesis state we know that the withdrawal credentials are set equal - // to the validator keypairs. Check for any manually initialised credentials. - assert!( - self.withdrawal_keypairs.is_empty(), - "withdrawal credentials are ignored by fresh_ephemeral_store" - ); - self.withdrawal_keypairs = validator_keypairs - .iter() - .map(|kp| Some(kp.clone())) - .collect(); - let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -321,6 +310,11 @@ where self } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { + self.withdrawal_keypairs = withdrawal_keypairs; + self + } + pub fn default_spec(self) -> Self { self.spec_or_default(None) } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index f24e94d1b..122ca8eda 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -10,7 +10,7 @@ use types::{ pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; -fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { +pub fn bls_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) @@ -35,42 +35,18 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let eth1_timestamp = 2_u64.pow(40); - let amount = spec.max_effective_balance; - - let datas = keypairs - .into_par_iter() - .map(|keypair| { - let mut data = DepositData { - withdrawal_credentials: bls_withdrawal_credentials(&keypair.pk, spec), - pubkey: keypair.pk.clone().into(), - amount, - signature: Signature::empty().into(), - }; - - data.signature = data.create_signature(&keypair.sk, spec); - - data - }) + let withdrawal_credentials = keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) .collect::>(); - - let mut state = initialize_beacon_state_from_eth1( + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, eth1_block_hash, - eth1_timestamp, - genesis_deposits(datas, spec)?, execution_payload_header, spec, ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - *state.genesis_time_mut() = genesis_time; - - // Invalidate all the caches after all the manual state surgery. - state - .drop_all_caches() - .map_err(|e| format!("Unable to drop caches: {:?}", e))?; - - Ok(state) } // returns an interop genesis state except every other @@ -82,23 +58,52 @@ pub fn interop_genesis_state_with_eth1( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { + let withdrawal_credentials = keypairs + .iter() + .enumerate() + .map(|(index, keypair)| { + if index % 2 == 0 { + bls_withdrawal_credentials(&keypair.pk, spec) + } else { + eth1_withdrawal_credentials(&keypair.pk, spec) + } + }) + .collect::>(); + interop_genesis_state_with_withdrawal_credentials::( + keypairs, + &withdrawal_credentials, + genesis_time, + eth1_block_hash, + execution_payload_header, + spec, + ) +} + +pub fn interop_genesis_state_with_withdrawal_credentials( + keypairs: &[Keypair], + withdrawal_credentials: &[Hash256], + genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, + spec: &ChainSpec, +) -> Result, String> { + if keypairs.len() != withdrawal_credentials.len() { + return Err(format!( + "wrong number of withdrawal credentials, expected: {}, got: {}", + keypairs.len(), + withdrawal_credentials.len() + )); + } + let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - let withdrawal_credentials = |index: usize, pubkey: &PublicKey| { - if index % 2 == 0 { - bls_withdrawal_credentials(pubkey, spec) - } else { - eth1_withdrawal_credentials(pubkey, spec) - } - }; - let datas = keypairs .into_par_iter() - .enumerate() - .map(|(index, keypair)| { + .zip(withdrawal_credentials.into_par_iter()) + .map(|(keypair, &withdrawal_credentials)| { let mut data = DepositData { - withdrawal_credentials: withdrawal_credentials(index, &keypair.pk), + withdrawal_credentials, pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty().into(), diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 4d5439ac1..3fb053bf8 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -6,6 +6,7 @@ pub use eth1::Config as Eth1Config; pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ - interop_genesis_state, interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH, + bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, + interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, }; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 077e3aa7c..0dc918f42 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -45,6 +45,7 @@ logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } unused_port = {path = "../../common/unused_port"} +genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 7c228d980..ee0273579 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -1,5 +1,7 @@ use beacon_chain::{ - test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, + test_utils::{ + BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, + }, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; @@ -55,25 +57,39 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Initializer = Box< + dyn FnOnce(HarnessBuilder>) -> HarnessBuilder>, +>; type Mutator = BoxedMutator, MemoryStore>; impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - Self::new_with_mutator(spec, validator_count, None).await + Self::new_with_initializer_and_mutator(spec, validator_count, None, None).await } - pub async fn new_with_mutator( + pub async fn new_with_initializer_and_mutator( spec: Option, validator_count: usize, + initializer: Option>, mutator: Option>, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) - .deterministic_keypairs(validator_count) .logger(test_logger()) - .mock_execution_layer() - .fresh_ephemeral_store(); + .mock_execution_layer(); + harness_builder = if let Some(initializer) = initializer { + // Apply custom initialization provided by the caller. + initializer(harness_builder) + } else { + // Apply default initial configuration. + harness_builder + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + }; + + // Add a mutator for the beacon chain builder which will be called in + // `HarnessBuilder::build`. if let Some(mutator) = mutator { harness_builder = harness_builder.initial_mutator(mutator); } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index eaaa4e864..e61470fe9 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,8 +1,15 @@ //! Tests for API behaviour across fork boundaries. use crate::common::*; -use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; +use beacon_chain::{ + test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, + StateSkipConfig, +}; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; -use types::{Address, ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; +use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use types::{ + test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, +}; type E = MinimalEthSpec; @@ -338,7 +345,39 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { let fork_epoch = Epoch::new(2); let spec = capella_spec(fork_epoch); let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); - let tester = InteractiveTester::::new(Some(spec.clone()), validator_count).await; + + // Use a genesis state with entirely BLS withdrawal credentials. + // Offset keypairs by `validator_count` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(validator_count); + let withdrawal_keypairs = (0..validator_count) + .map(|i| Some(generate_deterministic_keypair(i + validator_count))) + .collect::>(); + let withdrawal_credentials = withdrawal_keypairs + .iter() + .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + .collect::>(); + let genesis_state = interop_genesis_state_with_withdrawal_credentials( + &validator_keypairs, + &withdrawal_credentials, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let tester = InteractiveTester::::new_with_initializer_and_mutator( + Some(spec.clone()), + validator_count, + Some(Box::new(|harness_builder| { + harness_builder + .keypairs(validator_keypairs) + .withdrawal_keypairs(withdrawal_keypairs) + .genesis_state_ephemeral_store(genesis_state) + })), + None, + ) + .await; let harness = &tester.harness; let client = &tester.client; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 04d527d53..fdcc0d5fd 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -278,9 +278,10 @@ pub async fn proposer_boost_re_org_test( let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); - let tester = InteractiveTester::::new_with_mutator( + let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), validator_count, + None, Some(Box::new(move |builder| { builder .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) From 16bdb2771b7ca16a84f90d4fdc4ca6b37288312a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Jan 2023 16:18:00 +1100 Subject: [PATCH 19/25] Update another test broken by the shuffling change --- beacon_node/http_api/tests/interactive_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index fdcc0d5fd..7096fac42 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -545,7 +545,7 @@ pub async fn proposer_boost_re_org_test( pub async fn fork_choice_before_proposal() { // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. - let validator_count = 32; + let validator_count = 64; let all_validators = (0..validator_count).collect::>(); let num_initial: u64 = 31; From 0866b739d0cb4b7974e70cbd5b67388f36cd1361 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 27 Jan 2023 09:48:42 +0000 Subject: [PATCH 20/25] Clippy 1.67 (#3916) ## Proposed Changes Clippy 1.67.0 put us on blast for the size of some of our errors, most of them written by me ( :eyes: ). This PR shrinks the size of `BeaconChainError` by dropping some extraneous info and boxing an inner error which should only occur infrequently anyway. For the `AttestationSlashInfo` and `BlockSlashInfo` I opted to ignore the lint as they are always used in a `Result` where `A` is a similar size. This means they don't bloat the size of the `Result`, so it's a bit annoying for Clippy to report this as an issue. I also chose to ignore `clippy::uninlined-format-args` because I think the benefit-to-churn ratio is too low. E.g. sometimes we have long identifiers in `format!` args and IMO the non-inlined form is easier to read: ```rust // I prefer this... format!( "{} did {} to {}", REALLY_LONG_CONSTANT_NAME, ANOTHER_REALLY_LONG_CONSTANT_NAME, regular_long_identifier_name ); // To this format!("{REALLY_LONG_CONSTANT_NAME} did {ANOTHER_REALLY_LONG_CONSTANT_NAME} to {regular_long_identifier_name}"); ``` I tried generating an automatic diff with `cargo clippy --fix` but it came out at: ``` 250 files changed, 1209 insertions(+), 1469 deletions(-) ``` Which seems like a bad idea when we'd have to back-merge it to `capella` and `eip4844` :scream: --- Makefile | 3 ++- .../beacon_chain/src/attestation_verification.rs | 5 +++++ beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- beacon_node/beacon_chain/src/block_verification.rs | 5 +++++ beacon_node/beacon_chain/src/errors.rs | 4 +--- beacon_node/beacon_chain/src/fork_choice_signal.rs | 4 ++-- common/compare_fields/src/lib.rs | 10 +++------- common/compare_fields_derive/src/lib.rs | 2 +- .../src/per_block_processing/process_operations.rs | 8 ++++---- 9 files changed, 26 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index 68ada1b4b..ebad9b63f 100644 --- a/Makefile +++ b/Makefile @@ -164,7 +164,8 @@ lint: -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ - -A clippy::question-mark + -A clippy::question-mark \ + -A clippy::uninlined-format-args nightly-lint: cp .github/custom/clippy.toml . diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index b60ce7efe..04f601fad 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -27,6 +27,11 @@ //! â–¼ //! impl VerifiedAttestation //! ``` + +// Ignore this lint for `AttestationSlashInfo` which is of comparable size to the non-error types it +// is returned alongside. +#![allow(clippy::result_large_err)] + mod batch; use crate::{ diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 55d6ae29e..3366e1364 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -975,7 +975,9 @@ impl BeaconChain { .ok_or(Error::ExecutionLayerMissing)? .get_payload_by_block_hash(exec_block_hash) .await - .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? + .map_err(|e| { + Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, Box::new(e)) + })? .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; // Verify payload integrity. @@ -992,8 +994,6 @@ impl BeaconChain { return Err(Error::InconsistentPayloadReconstructed { slot: blinded_block.slot(), exec_block_hash, - canonical_payload_root: execution_payload_header.tree_hash_root(), - reconstructed_payload_root: header_from_payload.tree_hash_root(), canonical_transactions_root: execution_payload_header.transactions_root, reconstructed_transactions_root: header_from_payload.transactions_root, }); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ab317e96b..ad08bd9f4 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,11 @@ //! END //! //! ``` + +// Ignore this lint for `BlockSlashInfo` which is of comparable size to the non-error types it is +// returned alongside. +#![allow(clippy::result_large_err)] + use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 24ea07833..788369e55 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -141,13 +141,11 @@ pub enum BeaconChainError { BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), - ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), + ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, Box), BlockHashMissingFromExecutionLayer(ExecutionBlockHash), InconsistentPayloadReconstructed { slot: Slot, exec_block_hash: ExecutionBlockHash, - canonical_payload_root: Hash256, - reconstructed_payload_root: Hash256, canonical_transactions_root: Hash256, reconstructed_transactions_root: Hash256, }, diff --git a/beacon_node/beacon_chain/src/fork_choice_signal.rs b/beacon_node/beacon_chain/src/fork_choice_signal.rs index fd92de661..f5424d417 100644 --- a/beacon_node/beacon_chain/src/fork_choice_signal.rs +++ b/beacon_node/beacon_chain/src/fork_choice_signal.rs @@ -43,7 +43,7 @@ impl ForkChoiceSignalTx { /// /// Return an error if the provided `slot` is strictly less than any previously provided slot. pub fn notify_fork_choice_complete(&self, slot: Slot) -> Result<(), BeaconChainError> { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); @@ -72,7 +72,7 @@ impl Default for ForkChoiceSignalTx { impl ForkChoiceSignalRx { pub fn wait_for_fork_choice(&self, slot: Slot, timeout: Duration) -> ForkChoiceWaitResult { - let &(ref lock, ref condvar) = &*self.pair; + let (lock, condvar) = &*self.pair; let mut current_slot = lock.lock(); diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs index a0166eb50..bc2f5446a 100644 --- a/common/compare_fields/src/lib.rs +++ b/common/compare_fields/src/lib.rs @@ -115,11 +115,7 @@ impl Comparison { let mut children = vec![]; for i in 0..std::cmp::max(a.len(), b.len()) { - children.push(FieldComparison::new( - format!("{:}", i), - &a.get(i), - &b.get(i), - )); + children.push(FieldComparison::new(format!("{i}"), &a.get(i), &b.get(i))); } Self::parent(field_name, a == b, children) @@ -164,8 +160,8 @@ impl FieldComparison { Self { field_name, equal: a == b, - a: format!("{:?}", a), - b: format!("{:?}", b), + a: format!("{a:?}"), + b: format!("{b:?}"), } } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index beabc6ca9..752c09ee0 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -32,7 +32,7 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream { _ => panic!("compare_fields_derive only supports named struct fields."), }; - let field_name = format!("{:}", ident_a); + let field_name = ident_a.to_string(); let ident_b = ident_a.clone(); let quote = if is_slice(field) { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9f27c4c9a..9aa1e6d37 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,9 +9,9 @@ use crate::VerifySignatures; use safe_arith::SafeArith; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_operations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -232,9 +232,9 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( +pub fn process_attestations>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T, Payload>, + block_body: BeaconBlockBodyRef, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, From 7b7595347d110ebf2e6b6b04bece048ef1424fe5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:26:23 -0600 Subject: [PATCH 21/25] exchangeCapabilities & Capella Readiness Logging (#3918) * Undo Passing Spec to Engine API * Utilize engine_exchangeCapabilities * Add Logging to Indicate Capella Readiness * Add exchangeCapabilities to mock_execution_layer * Send Nested Array for engine_exchangeCapabilities * Use Mutex Instead of RwLock for EngineCapabilities * Improve Locking to Avoid Deadlock * Prettier logic for get_engine_capabilities * Improve Comments * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/beacon_chain/src/capella_readiness.rs Co-authored-by: Michael Sproul * Update beacon_node/client/src/notifier.rs Co-authored-by: Michael Sproul * Update beacon_node/execution_layer/src/engine_api/http.rs Co-authored-by: Michael Sproul * Addressed Michael's Comments --------- Co-authored-by: Michael Sproul --- .../beacon_chain/src/capella_readiness.rs | 135 +++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + .../beacon_chain/src/merge_readiness.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 2 - beacon_node/client/src/builder.rs | 1 - beacon_node/client/src/notifier.rs | 71 +++++- beacon_node/eth1/src/inner.rs | 2 +- beacon_node/eth1/src/service.rs | 17 +- beacon_node/eth1/tests/test.rs | 14 +- beacon_node/execution_layer/src/engine_api.rs | 39 +++- .../execution_layer/src/engine_api/http.rs | 215 ++++++++++++------ beacon_node/execution_layer/src/engines.rs | 61 ++++- beacon_node/execution_layer/src/lib.rs | 33 ++- .../src/test_utils/handle_rpc.rs | 168 +++++++++----- .../src/test_utils/mock_builder.rs | 3 +- .../src/test_utils/mock_execution_layer.rs | 5 +- .../execution_layer/src/test_utils/mod.rs | 20 +- .../src/test_rig.rs | 4 +- 18 files changed, 601 insertions(+), 192 deletions(-) create mode 100644 beacon_node/beacon_chain/src/capella_readiness.rs diff --git a/beacon_node/beacon_chain/src/capella_readiness.rs b/beacon_node/beacon_chain/src/capella_readiness.rs new file mode 100644 index 000000000..b15632105 --- /dev/null +++ b/beacon_node/beacon_chain/src/capella_readiness.rs @@ -0,0 +1,135 @@ +//! Provides tools for checking if a node is ready for the Capella upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Capella fork when we will start issuing warnings about preparation. +use super::merge_readiness::SECONDS_IN_A_WEEK; +pub const CAPELLA_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum CapellaReadiness { + /// The execution engine is capella-enabled (as far as we can tell) + Ready, + /// The EL can be reached and has the correct configuration, however it's not yet synced. + NotSynced, + /// We are connected to an execution engine which doesn't support the V2 engine api methods + V2MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for CapellaReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CapellaReadiness::Ready => { + write!(f, "This node appears ready for Capella.") + } + CapellaReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + CapellaReadiness::NotSynced => write!( + f, + "The execution endpoint is connected and configured, \ + however it is not yet synced" + ), + CapellaReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + CapellaReadiness::V2MethodsNotSupported { error } => write!( + f, + "The execution endpoint does not appear to support \ + the required engine api methods for Capella: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if capella epoch is set and Capella fork has occurred or will + /// occur within `CAPELLA_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_capella(&self, current_slot: Slot) -> bool { + if let Some(capella_epoch) = self.spec.capella_fork_epoch { + let capella_slot = capella_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let capella_readiness_preparation_slots = + CAPELLA_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Capella has happened or is within the preparation time. + current_slot + capella_readiness_preparation_slots > capella_slot + } else { + // The Capella fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for capella. + pub async fn check_capella_readiness(&self) -> CapellaReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + CapellaReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V2); + all_good = false; + } + if !capabilities.forkchoice_updated_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V2); + all_good = false; + } + if !capabilities.new_payload_v2 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V2); + all_good = false; + } + + if all_good { + if !el.is_synced_for_notifier().await { + // The EL is not synced. + CapellaReadiness::NotSynced + } else { + CapellaReadiness::Ready + } + } else { + CapellaReadiness::V2MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + CapellaReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c17b48517..2444c144f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -11,6 +11,7 @@ mod block_times_cache; mod block_verification; pub mod builder; pub mod canonical_head; +pub mod capella_readiness; pub mod chain_config; mod early_attester_cache; mod errors; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index 4ef2102fd..c66df39ee 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -8,7 +8,7 @@ use std::fmt::Write; use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. -const SECONDS_IN_A_WEEK: u64 = 604800; +pub const SECONDS_IN_A_WEEK: u64 = 604800; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c5da51899..875ff845a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -375,7 +375,6 @@ where .collect::>() .unwrap(); - let spec = MainnetEthSpec::default_spec(); let config = execution_layer::Config { execution_endpoints: urls, secret_files: vec![], @@ -386,7 +385,6 @@ where config, self.runtime.task_executor.clone(), self.log.clone(), - &spec, ) .unwrap(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b19b636c7..3b016ebda 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -154,7 +154,6 @@ where config, context.executor.clone(), context.log().clone(), - &spec, ) .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; Some(execution_layer) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1da7a7970..c1d830bc0 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,6 @@ use crate::metrics; use beacon_chain::{ + capella_readiness::CapellaReadiness, merge_readiness::{MergeConfig, MergeReadiness}, BeaconChain, BeaconChainTypes, ExecutionStatus, }; @@ -313,6 +314,7 @@ pub fn spawn_notifier( eth1_logging(&beacon_chain, &log); merge_readiness_logging(current_slot, &beacon_chain, &log).await; + capella_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -350,12 +352,15 @@ async fn merge_readiness_logging( } if merge_completed && !has_execution_layer { - error!( - log, - "Execution endpoint required"; - "info" => "you need an execution engine to validate blocks, see: \ - https://lighthouse-book.sigmaprime.io/merge-migration.html" - ); + if !beacon_chain.is_time_to_prepare_for_capella(current_slot) { + // logging of the EE being offline is handled in `capella_readiness_logging()` + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + } return; } @@ -419,6 +424,60 @@ async fn merge_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Capella +async fn capella_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let capella_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| payload.withdrawals_root().is_ok()); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if capella_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_capella(current_slot) + { + return; + } + + if capella_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Capella enabled execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_capella_readiness().await { + CapellaReadiness::Ready => { + info!(log, "Ready for Capella") + } + readiness @ CapellaReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Capella"; + "info" => %readiness, + "hint" => "try updating Lighthouse and/or the execution layer", + ) + } + readiness => warn!( + log, + "Not ready for Capella"; + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index a44b31050..0468a02d2 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -122,7 +122,7 @@ impl SszEth1Cache { cache: self.deposit_cache.to_deposit_cache()?, last_processed_block: self.last_processed_block, }), - endpoint: endpoint_from_config(&config, &spec) + endpoint: endpoint_from_config(&config) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 56c2411ba..31082394b 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -363,7 +363,7 @@ impl Default for Config { } } -pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result { +pub fn endpoint_from_config(config: &Config) -> Result { match config.endpoint.clone() { Eth1Endpoint::Auth { endpoint, @@ -373,16 +373,11 @@ pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result { let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth( - endpoint, - auth, - Some(config.execution_timeout_multiplier), - spec, - ) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) + HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) + .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } Eth1Endpoint::NoAuth(endpoint) => { - HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier), spec) + HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) } } @@ -409,7 +404,7 @@ impl Service { deposit_cache: RwLock::new(DepositUpdater::new( config.deposit_contract_deploy_block, )), - endpoint: endpoint_from_config(&config, &spec)?, + endpoint: endpoint_from_config(&config)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), @@ -438,7 +433,7 @@ impl Service { inner: Arc::new(Inner { block_cache: <_>::default(), deposit_cache: RwLock::new(deposit_cache), - endpoint: endpoint_from_config(&config, &spec) + endpoint: endpoint_from_config(&config) .map_err(Error::FailedToInitializeFromSnapshot)?, to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index eb0d2371c..cd680478c 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -494,8 +494,7 @@ mod deposit_tree { let mut deposit_counts = vec![]; let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, spec) - .unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { @@ -599,12 +598,8 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let client = HttpJsonRpc::new( - SensitiveUrl::parse(ð1.endpoint()).unwrap(), - None, - &MainnetEthSpec::default_spec(), - ) - .unwrap(); + let client = + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; @@ -720,8 +715,7 @@ mod fast { ) .unwrap(); let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, &spec) - .unwrap(); + HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index afc5cffe2..da5e991b0 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,4 +1,9 @@ use crate::engines::ForkchoiceState; +use crate::http::{ + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, +}; pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; @@ -347,11 +352,8 @@ impl GetPayloadResponse { } } -// This name is work in progress, it could -// change when this method is actually proposed -// but I'm writing this as it has been described #[derive(Clone, Copy, Debug)] -pub struct SupportedApis { +pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, pub forkchoice_updated_v1: bool, @@ -360,3 +362,32 @@ pub struct SupportedApis { pub get_payload_v2: bool, pub exchange_transition_configuration_v1: bool, } + +impl EngineCapabilities { + pub fn to_response(&self) -> Vec<&str> { + let mut response = Vec::new(); + if self.new_payload_v1 { + response.push(ENGINE_NEW_PAYLOAD_V1); + } + if self.new_payload_v2 { + response.push(ENGINE_NEW_PAYLOAD_V2); + } + if self.forkchoice_updated_v1 { + response.push(ENGINE_FORKCHOICE_UPDATED_V1); + } + if self.forkchoice_updated_v2 { + response.push(ENGINE_FORKCHOICE_UPDATED_V2); + } + if self.get_payload_v1 { + response.push(ENGINE_GET_PAYLOAD_V1); + } + if self.get_payload_v2 { + response.push(ENGINE_GET_PAYLOAD_V2); + } + if self.exchange_transition_configuration_v1 { + response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); + } + + response + } +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 60725192b..d1faab42c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,10 +7,11 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; -use tokio::sync::RwLock; +use std::collections::HashSet; +use tokio::sync::Mutex; -use std::time::Duration; -use types::{ChainSpec, EthSpec}; +use std::time::{Duration, SystemTime}; +use types::EthSpec; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -48,8 +49,37 @@ pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; +pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; +/// This code is returned by all clients when a method is not supported +/// (verified geth, nethermind, erigon, besu) +pub const METHOD_NOT_FOUND_CODE: i64 = -32601; + +pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ + ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V1, + ENGINE_GET_PAYLOAD_V2, + ENGINE_FORKCHOICE_UPDATED_V1, + ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, +]; + +/// This is necessary because a user might run a capella-enabled version of +/// lighthouse before they update to a capella-enabled execution engine. +// TODO (mark): rip this out once we are post-capella on mainnet +pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: false, + forkchoice_updated_v1: true, + forkchoice_updated_v2: false, + get_payload_v1: true, + get_payload_v2: false, + exchange_transition_configuration_v1: true, +}; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { @@ -526,11 +556,47 @@ pub mod deposit_methods { } } +#[derive(Clone, Debug)] +pub struct CapabilitiesCacheEntry { + engine_capabilities: EngineCapabilities, + fetch_time: SystemTime, +} + +impl CapabilitiesCacheEntry { + pub fn new(engine_capabilities: EngineCapabilities) -> Self { + Self { + engine_capabilities, + fetch_time: SystemTime::now(), + } + } + + pub fn engine_capabilities(&self) -> &EngineCapabilities { + &self.engine_capabilities + } + + pub fn age(&self) -> Duration { + // duration_since() may fail because measurements taken earlier + // are not guaranteed to always be before later measurements + // due to anomalies such as the system clock being adjusted + // either forwards or backwards + // + // In such cases, we'll just say the age is zero + SystemTime::now() + .duration_since(self.fetch_time) + .unwrap_or(Duration::ZERO) + } + + /// returns `true` if the entry's age is >= age_limit + pub fn older_than(&self, age_limit: Option) -> bool { + age_limit.map_or(false, |limit| self.age() >= limit) + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, - pub cached_supported_apis: RwLock>, + pub engine_capabilities_cache: Mutex>, auth: Option, } @@ -538,27 +604,12 @@ impl HttpJsonRpc { pub fn new( url: SensitiveUrl, execution_timeout_multiplier: Option, - spec: &ChainSpec, ) -> Result { - // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` - // method is implemented in all execution clients: - // https://github.com/ethereum/execution-apis/issues/321 - let cached_supported_apis = RwLock::new(Some(SupportedApis { - new_payload_v1: true, - new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - forkchoice_updated_v1: true, - forkchoice_updated_v2: spec.capella_fork_epoch.is_some() - || spec.eip4844_fork_epoch.is_some(), - get_payload_v1: true, - get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - exchange_transition_configuration_v1: true, - })); - Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis, + engine_capabilities_cache: Mutex::new(None), auth: None, }) } @@ -567,27 +618,12 @@ impl HttpJsonRpc { url: SensitiveUrl, auth: Auth, execution_timeout_multiplier: Option, - spec: &ChainSpec, ) -> Result { - // FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities` - // method is implemented in all execution clients: - // https://github.com/ethereum/execution-apis/issues/321 - let cached_supported_apis = RwLock::new(Some(SupportedApis { - new_payload_v1: true, - new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - forkchoice_updated_v1: true, - forkchoice_updated_v2: spec.capella_fork_epoch.is_some() - || spec.eip4844_fork_epoch.is_some(), - get_payload_v1: true, - get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(), - exchange_transition_configuration_v1: true, - })); - Ok(Self { client: Client::builder().build()?, url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), - cached_supported_apis, + engine_capabilities_cache: Mutex::new(None), auth: Some(auth), }) } @@ -893,35 +929,67 @@ impl HttpJsonRpc { Ok(response) } - // TODO: This is currently a stub for the `engine_getCapabilities` - // method. This stub is unused because we set cached_supported_apis - // in the constructor based on the `spec` - // Implement this once the execution clients support it - // https://github.com/ethereum/execution-apis/issues/321 - pub async fn get_capabilities(&self) -> Result { - Ok(SupportedApis { - new_payload_v1: true, - new_payload_v2: true, - forkchoice_updated_v1: true, - forkchoice_updated_v2: true, - get_payload_v1: true, - get_payload_v2: true, - exchange_transition_configuration_v1: true, - }) + pub async fn exchange_capabilities(&self) -> Result { + let params = json!([LIGHTHOUSE_CAPABILITIES]); + + let response: Result, _> = self + .rpc_request( + ENGINE_EXCHANGE_CAPABILITIES, + params, + ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT * self.execution_timeout_multiplier, + ) + .await; + + match response { + // TODO (mark): rip this out once we are post capella on mainnet + Err(error) => match error { + Error::ServerMessage { code, message: _ } if code == METHOD_NOT_FOUND_CODE => { + Ok(PRE_CAPELLA_ENGINE_CAPABILITIES) + } + _ => Err(error), + }, + Ok(capabilities) => Ok(EngineCapabilities { + new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), + new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), + forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), + forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), + get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), + get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), + exchange_transition_configuration_v1: capabilities + .contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1), + }), + } } - pub async fn set_cached_supported_apis(&self, supported_apis: Option) { - *self.cached_supported_apis.write().await = supported_apis; + pub async fn clear_exchange_capabilties_cache(&self) { + *self.engine_capabilities_cache.lock().await = None; } - pub async fn get_cached_supported_apis(&self) -> Result { - let cached_opt = *self.cached_supported_apis.read().await; - if let Some(supported_apis) = cached_opt { - Ok(supported_apis) + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + let mut lock = self.engine_capabilities_cache.lock().await; + + if lock + .as_ref() + .map_or(true, |entry| entry.older_than(age_limit)) + { + let engine_capabilities = self.exchange_capabilities().await?; + *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + Ok(engine_capabilities) } else { - let supported_apis = self.get_capabilities().await?; - self.set_cached_supported_apis(Some(supported_apis)).await; - Ok(supported_apis) + // here entry is guaranteed to exist so unwrap() is safe + Ok(*lock.as_ref().unwrap().engine_capabilities()) } } @@ -931,10 +999,10 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.new_payload_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.new_payload_v2 { self.new_payload_v2(execution_payload).await - } else if supported_apis.new_payload_v1 { + } else if engine_capabilities.new_payload_v1 { self.new_payload_v1(execution_payload).await } else { Err(Error::RequiredMethodUnsupported("engine_newPayload")) @@ -948,8 +1016,8 @@ impl HttpJsonRpc { fork_name: ForkName, payload_id: PayloadId, ) -> Result, Error> { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.get_payload_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.get_payload_v2 { // TODO: modify this method to return GetPayloadResponse instead // of throwing away the `block_value` and returning only the // ExecutionPayload @@ -957,7 +1025,7 @@ impl HttpJsonRpc { .get_payload_v2(fork_name, payload_id) .await? .execution_payload()) - } else if supported_apis.new_payload_v1 { + } else if engine_capabilities.new_payload_v1 { self.get_payload_v1(payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayload")) @@ -971,11 +1039,11 @@ impl HttpJsonRpc { forkchoice_state: ForkchoiceState, payload_attributes: Option, ) -> Result { - let supported_apis = self.get_cached_supported_apis().await?; - if supported_apis.forkchoice_updated_v2 { + let engine_capabilities = self.get_engine_capabilities(None).await?; + if engine_capabilities.forkchoice_updated_v2 { self.forkchoice_updated_v2(forkchoice_state, payload_attributes) .await - } else if supported_apis.forkchoice_updated_v1 { + } else if engine_capabilities.forkchoice_updated_v1 { self.forkchoice_updated_v1(forkchoice_state, payload_attributes) .await } else { @@ -1003,7 +1071,6 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { let server = MockServer::unit_testing(); - let spec = MainnetEthSpec::default_spec(); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); @@ -1014,13 +1081,13 @@ mod test { let echo_auth = Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( - Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None, &spec).unwrap()), - Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()), + Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()), ) } else { ( - Arc::new(HttpJsonRpc::new(rpc_url, None, &spec).unwrap()), - Arc::new(HttpJsonRpc::new(echo_url, None, &spec).unwrap()), + Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()), + Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()), ) }; diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 271cca26c..5532fbb34 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,13 +1,15 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, + PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, info, warn, Logger}; use std::future::Future; use std::sync::Arc; +use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; @@ -18,6 +20,7 @@ use types::ExecutionBlockHash; /// Since the size of each value is small (~100 bytes) a large number is used for safety. /// FIXME: check this assumption now that the key includes entire payload attributes which now includes withdrawals const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -29,6 +32,14 @@ enum EngineStateInternal { AuthFailed, } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +enum CapabilitiesCacheAction { + #[default] + None, + Update, + Clear, +} + /// A subset of the engine state to inform other services if the engine is online or offline. #[derive(Debug, Clone, PartialEq, Eq, Copy)] pub enum EngineState { @@ -231,7 +242,7 @@ impl Engine { /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { - let state: EngineStateInternal = match self.api.upcheck().await { + let (state, cache_action) = match self.api.upcheck().await { Ok(()) => { let mut state = self.state.write().await; if **state != EngineStateInternal::Synced { @@ -249,12 +260,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - **state + (**state, CapabilitiesCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -265,7 +276,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - **state + (**state, CapabilitiesCacheAction::None) } Err(e) => { error!( @@ -276,10 +287,30 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - **state + // need to clear the engine capabilities cache if we detect the + // execution engine is offline as it is likely the engine is being + // updated to a newer version with new capabilities + (**state, CapabilitiesCacheAction::Clear) } }; + // do this after dropping state lock guard to avoid holding two locks at once + match cache_action { + CapabilitiesCacheAction::None => {} + CapabilitiesCacheAction::Update => { + if let Err(e) = self + .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .await + { + warn!(self.log, + "Error during exchange capabilities"; + "error" => ?e, + ) + } + } + CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + } + debug!( self.log, "Execution engine upcheck complete"; @@ -287,6 +318,22 @@ impl Engine { ); } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.api.get_engine_capabilities(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 0a1a1eef3..ad72453f1 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,6 +7,7 @@ use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; +pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; @@ -265,12 +266,7 @@ pub struct ExecutionLayer { impl ExecutionLayer { /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. - pub fn from_config( - config: Config, - executor: TaskExecutor, - log: Logger, - spec: &ChainSpec, - ) -> Result { + pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, builder_url, @@ -325,9 +321,8 @@ impl ExecutionLayer { let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = - HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier, spec) - .map_err(Error::ApiError)?; + let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier) + .map_err(Error::ApiError)?; Engine::new(api, executor.clone(), &log) }; @@ -1367,6 +1362,26 @@ impl ExecutionLayer { } } + /// Returns the execution engine capabilities resulting from a call to + /// engine_exchangeCapabilities. If the capabilities cache is not populated, + /// or if it is populated with a cached result of age >= `age_limit`, this + /// method will fetch the result from the execution engine and populate the + /// cache before returning it. Otherwise it will return a cached result from + /// a previous call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_capabilities( + &self, + age_limit: Option, + ) -> Result { + self.engine() + .request(|engine| engine.get_engine_capabilities(age_limit)) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Used during block production to determine if the merge has been triggered. /// /// ## Specification diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 1e0963649..31a8a5da1 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -6,20 +6,27 @@ use serde_json::Value as JsonValue; use std::sync::Arc; use types::{EthSpec, ForkName}; +pub const GENERIC_ERROR_CODE: i64 = -1234; +pub const BAD_PARAMS_ERROR_CODE: i64 = -32602; +pub const UNKNOWN_PAYLOAD_ERROR_CODE: i64 = -38001; +pub const FORK_REQUEST_MISMATCH_ERROR_CODE: i64 = -32000; + pub async fn handle_rpc( body: JsonValue, ctx: Arc>, -) -> Result { +) -> Result { *ctx.previous_request.lock() = Some(body.clone()); let method = body .get("method") .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid method field".to_string())?; + .ok_or_else(|| "missing/invalid method field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; let params = body .get("params") - .ok_or_else(|| "missing/invalid params field".to_string())?; + .ok_or_else(|| "missing/invalid params field".to_string()) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { ETH_SYNCING => Ok(JsonValue::Bool(false)), @@ -27,7 +34,8 @@ pub async fn handle_rpc( let tag = params .get(0) .and_then(JsonValue::as_str) - .ok_or_else(|| "missing/invalid params[0] value".to_string())?; + .ok_or_else(|| "missing/invalid params[0] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; match tag { "latest" => Ok(serde_json::to_value( @@ -36,7 +44,10 @@ pub async fn handle_rpc( .latest_execution_block(), ) .unwrap()), - other => Err(format!("The tag {} is not supported", other)), + other => Err(( + format!("The tag {} is not supported", other), + BAD_PARAMS_ERROR_CODE, + )), } } ETH_GET_BLOCK_BY_HASH => { @@ -47,7 +58,8 @@ pub async fn handle_rpc( .and_then(|s| { s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) - })?; + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; // If we have a static response set, just return that. if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { @@ -57,7 +69,8 @@ pub async fn handle_rpc( let full_tx = params .get(1) .and_then(JsonValue::as_bool) - .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + .ok_or_else(|| "missing/invalid params[1] value".to_string()) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; if full_tx { Ok(serde_json::to_value( ctx.execution_block_generator @@ -76,15 +89,17 @@ pub async fn handle_rpc( } ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => { let request = match method { - ENGINE_NEW_PAYLOAD_V1 => { - JsonExecutionPayload::V1(get_param::>(params, 0)?) - } + ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( + get_param::>(params, 0) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ), ENGINE_NEW_PAYLOAD_V2 => get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V2(jep)) .or_else(|_| { get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V1(jep)) - })?, + }) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, // TODO(4844) add that here.. _ => unreachable!(), }; @@ -97,20 +112,29 @@ pub async fn handle_rpc( match fork { ForkName::Merge => { if matches!(request, JsonExecutionPayload::V2(_)) { - return Err(format!( - "{} called with `ExecutionPayloadV2` before capella fork!", - method + return Err(( + format!( + "{} called with `ExecutionPayloadV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } ForkName::Capella => { if method == ENGINE_NEW_PAYLOAD_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + GENERIC_ERROR_CODE, + )); } if matches!(request, JsonExecutionPayload::V1(_)) { - return Err(format!( - "{} called with `ExecutionPayloadV1` after capella fork!", - method + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } @@ -149,14 +173,20 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => { - let request: JsonPayloadIdRequest = get_param(params, 0)?; + let request: JsonPayloadIdRequest = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); let response = ctx .execution_block_generator .write() .get_payload(&id) - .ok_or_else(|| format!("no payload for id {:?}", id))?; + .ok_or_else(|| { + ( + format!("no payload for id {:?}", id), + UNKNOWN_PAYLOAD_ERROR_CODE, + ) + })?; // validate method called correctly according to shanghai fork time if ctx @@ -166,7 +196,10 @@ pub async fn handle_rpc( == ForkName::Capella && method == ENGINE_GET_PAYLOAD_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); } // TODO(4844) add 4844 error checking here @@ -195,38 +228,42 @@ pub async fn handle_rpc( } } ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => { - let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?; + let forkchoice_state: JsonForkchoiceStateV1 = + get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { ENGINE_FORKCHOICE_UPDATED_V1 => { - let jpa1: Option = get_param(params, 1)?; + let jpa1: Option = + get_param(params, 1).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; jpa1.map(JsonPayloadAttributes::V1) } ENGINE_FORKCHOICE_UPDATED_V2 => { // we can't use `deny_unknown_fields` without breaking compatibility with some // clients that haven't updated to the latest engine_api spec. So instead we'll // need to deserialize based on timestamp - get_param::>(params, 1).and_then(|pa| { - pa.and_then(|pa| { - match ctx - .execution_block_generator - .read() - .get_fork_at_timestamp(*pa.timestamp()) - { - ForkName::Merge => { - get_param::>(params, 1) - .map(|opt| opt.map(JsonPayloadAttributes::V1)) - .transpose() + get_param::>(params, 1) + .and_then(|pa| { + pa.and_then(|pa| { + match ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(*pa.timestamp()) + { + ForkName::Merge => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V1)) + .transpose() + } + ForkName::Capella => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V2)) + .transpose() + } + _ => unreachable!(), } - ForkName::Capella => { - get_param::>(params, 1) - .map(|opt| opt.map(JsonPayloadAttributes::V2)) - .transpose() - } - _ => unreachable!(), - } + }) + .transpose() }) - .transpose() - })? + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } _ => unreachable!(), }; @@ -240,20 +277,29 @@ pub async fn handle_rpc( { ForkName::Merge => { if matches!(pa, JsonPayloadAttributes::V2(_)) { - return Err(format!( - "{} called with `JsonPayloadAttributesV2` before capella fork!", - method + return Err(( + format!( + "{} called with `JsonPayloadAttributesV2` before Capella fork!", + method + ), + GENERIC_ERROR_CODE, )); } } ForkName::Capella => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { - return Err(format!("{} called after capella fork!", method)); + return Err(( + format!("{} called after Capella fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); } if matches!(pa, JsonPayloadAttributes::V1(_)) { - return Err(format!( - "{} called with `JsonPayloadAttributesV1` after capella fork!", - method + return Err(( + format!( + "{} called with `JsonPayloadAttributesV1` after Capella fork!", + method + ), + FORK_REQUEST_MISMATCH_ERROR_CODE, )); } } @@ -281,10 +327,14 @@ pub async fn handle_rpc( return Ok(serde_json::to_value(response).unwrap()); } - let mut response = ctx.execution_block_generator.write().forkchoice_updated( - forkchoice_state.into(), - payload_attributes.map(|json| json.into()), - )?; + let mut response = ctx + .execution_block_generator + .write() + .forkchoice_updated( + forkchoice_state.into(), + payload_attributes.map(|json| json.into()), + ) + .map_err(|s| (s, GENERIC_ERROR_CODE))?; if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { if status.status == PayloadStatusV1Status::Valid { @@ -305,9 +355,13 @@ pub async fn handle_rpc( }; Ok(serde_json::to_value(transition_config).unwrap()) } - other => Err(format!( - "The method {} does not exist/is not available", - other + ENGINE_EXCHANGE_CAPABILITIES => { + let engine_capabilities = ctx.engine_capabilities.read(); + Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) + } + other => Err(( + format!("The method {} does not exist/is not available", other), + METHOD_NOT_FOUND_CODE, )), } } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 8ce4a6556..06b5e81eb 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -84,8 +84,7 @@ impl TestingBuilder { }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) - .unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); // This should probably be done for all fields, we only update ones we are testing with so far. let mut context = Context::for_mainnet(); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index d061f13a6..ad73b2b4e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -73,8 +73,7 @@ impl MockExecutionLayer { ..Default::default() }; let el = - ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec) - .unwrap(); + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); Self { server, @@ -106,7 +105,7 @@ impl MockExecutionLayer { prev_randao, Address::repeat_byte(42), // FIXME: think about how to handle different forks / withdrawals here.. - Some(vec![]), + None, ); // Insert a proposer to ensure the fork choice updated command works. diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index bad02e369..adf9358f0 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; +use crate::EngineCapabilities; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; @@ -31,6 +32,15 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { + new_payload_v1: true, + new_payload_v2: true, + forkchoice_updated_v1: true, + forkchoice_updated_v2: true, + get_payload_v1: true, + get_payload_v2: true, + exchange_transition_configuration_v1: true, +}; mod execution_block_generator; mod handle_rpc; @@ -117,6 +127,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -147,6 +158,10 @@ impl MockServer { } } + pub fn set_engine_capabilities(&self, engine_capabilities: EngineCapabilities) { + *self.ctx.engine_capabilities.write() = engine_capabilities; + } + pub fn new( handle: &runtime::Handle, jwt_key: JwtKey, @@ -469,6 +484,7 @@ pub struct Context { pub new_payload_statuses: Arc>>, pub fcu_payload_statuses: Arc>>, + pub engine_capabilities: Arc>, pub _phantom: PhantomData, } @@ -620,11 +636,11 @@ pub fn serve( "jsonrpc": JSONRPC_VERSION, "result": result }), - Err(message) => json!({ + Err((message, code)) => json!({ "id": id, "jsonrpc": JSONRPC_VERSION, "error": { - "code": -1234, // Junk error code. + "code": code, "message": message } }), diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 2daacb0ad..fe7e51e92 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -127,7 +127,7 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor.clone(), log.clone(), &spec).unwrap(); + ExecutionLayer::from_config(config, executor.clone(), log.clone()).unwrap(); ExecutionPair { execution_engine, execution_layer, @@ -146,7 +146,7 @@ impl TestRig { ..Default::default() }; let execution_layer = - ExecutionLayer::from_config(config, executor, log.clone(), &spec).unwrap(); + ExecutionLayer::from_config(config, executor, log.clone()).unwrap(); ExecutionPair { execution_engine, execution_layer, From 90b6ae62e630c1b39e7d7f0595b0ec8027c0291c Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Wed, 1 Feb 2023 19:37:46 -0600 Subject: [PATCH 22/25] Use Local Payload if More Profitable than Builder (#3934) * Use Local Payload if More Profitable than Builder * Rename clone -> clone_from_ref * Minimize Clones of GetPayloadResponse * Cleanup & Fix Tests * Added Tests for Payload Choice by Profit * Fix Outdated Comments --- beacon_node/beacon_chain/src/test_utils.rs | 8 +- beacon_node/execution_layer/src/engine_api.rs | 55 +++-- .../execution_layer/src/engine_api/http.rs | 23 +-- beacon_node/execution_layer/src/lib.rs | 112 ++++++++--- .../src/test_utils/handle_rpc.rs | 5 +- .../src/test_utils/mock_builder.rs | 4 +- .../src/test_utils/mock_execution_layer.rs | 4 +- .../execution_layer/src/test_utils/mod.rs | 2 + beacon_node/http_api/tests/tests.rs | 190 ++++++++++++++++-- consensus/types/src/execution_payload.rs | 10 + 10 files changed, 344 insertions(+), 69 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 875ff845a..daba7115e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -426,6 +426,7 @@ where DEFAULT_TERMINAL_BLOCK, shanghai_time, eip4844_time, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -435,7 +436,11 @@ where self } - pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + pub fn mock_execution_layer_with_builder( + mut self, + beacon_url: SensitiveUrl, + builder_threshold: Option, + ) -> Self { // Get a random unused port let port = unused_port::unused_tcp_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); @@ -452,6 +457,7 @@ where DEFAULT_TERMINAL_BLOCK, shanghai_time, eip4844_time, + builder_threshold, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), Some(builder_url.clone()), diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index da5e991b0..9918b679c 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -14,8 +14,8 @@ use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ - Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, - ForkName, Hash256, Uint256, VariableList, Withdrawal, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, + ExecutionPayloadRef, FixedVector, ForkName, Hash256, Uint256, VariableList, Withdrawal, }; use types::{ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge}; @@ -322,6 +322,8 @@ pub struct ProposeBlindedBlockResponse { #[superstruct( variants(Merge, Capella, Eip4844), variant_attributes(derive(Clone, Debug, PartialEq),), + map_into(ExecutionPayload), + map_ref_into(ExecutionPayloadRef), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] @@ -336,22 +338,47 @@ pub struct GetPayloadResponse { pub block_value: Uint256, } -impl GetPayloadResponse { - pub fn execution_payload(self) -> ExecutionPayload { - match self { - GetPayloadResponse::Merge(response) => { - ExecutionPayload::Merge(response.execution_payload) - } - GetPayloadResponse::Capella(response) => { - ExecutionPayload::Capella(response.execution_payload) - } - GetPayloadResponse::Eip4844(response) => { - ExecutionPayload::Eip4844(response.execution_payload) - } +impl<'a, T: EthSpec> From> for ExecutionPayloadRef<'a, T> { + fn from(response: GetPayloadResponseRef<'a, T>) -> Self { + map_get_payload_response_ref_into_execution_payload_ref!(&'a _, response, |inner, cons| { + cons(&inner.execution_payload) + }) + } +} + +impl From> for ExecutionPayload { + fn from(response: GetPayloadResponse) -> Self { + map_get_payload_response_into_execution_payload!(response, |inner, cons| { + cons(inner.execution_payload) + }) + } +} + +impl From> for (ExecutionPayload, Uint256) { + fn from(response: GetPayloadResponse) -> Self { + match response { + GetPayloadResponse::Merge(inner) => ( + ExecutionPayload::Merge(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Capella(inner) => ( + ExecutionPayload::Capella(inner.execution_payload), + inner.block_value, + ), + GetPayloadResponse::Eip4844(inner) => ( + ExecutionPayload::Eip4844(inner.execution_payload), + inner.block_value, + ), } } } +impl GetPayloadResponse { + pub fn execution_payload_ref(&self) -> ExecutionPayloadRef { + self.to_ref().into() + } +} + #[derive(Clone, Copy, Debug)] pub struct EngineCapabilities { pub new_payload_v1: bool, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d1faab42c..3871ca27a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -804,7 +804,7 @@ impl HttpJsonRpc { pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let params = json!([JsonPayloadIdRequest::from(payload_id)]); let payload_v1: JsonExecutionPayloadV1 = self @@ -815,7 +815,11 @@ impl HttpJsonRpc { ) .await?; - Ok(JsonExecutionPayload::V1(payload_v1).into()) + Ok(GetPayloadResponse::Merge(GetPayloadResponseMerge { + execution_payload: payload_v1.into(), + // Have to guess zero here as we don't know the value + block_value: Uint256::zero(), + })) } pub async fn get_payload_v2( @@ -1015,16 +1019,10 @@ impl HttpJsonRpc { &self, fork_name: ForkName, payload_id: PayloadId, - ) -> Result, Error> { + ) -> Result, Error> { let engine_capabilities = self.get_engine_capabilities(None).await?; if engine_capabilities.get_payload_v2 { - // TODO: modify this method to return GetPayloadResponse instead - // of throwing away the `block_value` and returning only the - // ExecutionPayload - Ok(self - .get_payload_v2(fork_name, payload_id) - .await? - .execution_payload()) + self.get_payload_v2(fork_name, payload_id).await } else if engine_capabilities.new_payload_v1 { self.get_payload_v1(payload_id).await } else { @@ -1675,10 +1673,11 @@ mod test { } })], |client| async move { - let payload = client + let payload: ExecutionPayload<_> = client .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) .await - .unwrap(); + .unwrap() + .into(); let expected = ExecutionPayload::Merge(ExecutionPayloadMerge { parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ad72453f1..8f206886e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -119,9 +119,13 @@ impl From for Error { } pub enum BlockProposalContents> { - Payload(Payload), + Payload { + payload: Payload, + block_value: Uint256, + }, PayloadAndBlobs { payload: Payload, + block_value: Uint256, kzg_commitments: Vec, blobs: Vec>, }, @@ -130,9 +134,13 @@ pub enum BlockProposalContents> { impl> BlockProposalContents { pub fn payload(&self) -> &Payload { match self { - Self::Payload(payload) => payload, + Self::Payload { + payload, + block_value: _, + } => payload, Self::PayloadAndBlobs { payload, + block_value: _, kzg_commitments: _, blobs: _, } => payload, @@ -140,9 +148,13 @@ impl> BlockProposalContents Payload { match self { - Self::Payload(payload) => payload, + Self::Payload { + payload, + block_value: _, + } => payload, Self::PayloadAndBlobs { payload, + block_value: _, kzg_commitments: _, blobs: _, } => payload, @@ -150,9 +162,13 @@ impl> BlockProposalContents Option<&[KzgCommitment]> { match self { - Self::Payload(_) => None, + Self::Payload { + payload: _, + block_value: _, + } => None, Self::PayloadAndBlobs { payload: _, + block_value: _, kzg_commitments, blobs: _, } => Some(kzg_commitments), @@ -160,21 +176,43 @@ impl> BlockProposalContents Option<&[Blob]> { match self { - Self::Payload(_) => None, + Self::Payload { + payload: _, + block_value: _, + } => None, Self::PayloadAndBlobs { payload: _, + block_value: _, kzg_commitments: _, blobs, } => Some(blobs), } } + pub fn block_value(&self) -> &Uint256 { + match self { + Self::Payload { + payload: _, + block_value, + } => block_value, + Self::PayloadAndBlobs { + payload: _, + block_value, + kzg_commitments: _, + blobs: _, + } => block_value, + } + } pub fn default_at_fork(fork_name: ForkName) -> Result { Ok(match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?) + BlockProposalContents::Payload { + payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), + } } ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs { payload: Payload::default_at_fork(fork_name)?, + block_value: Uint256::zero(), blobs: vec![], kzg_commitments: vec![], }, @@ -366,12 +404,12 @@ impl ExecutionLayer { &self.inner.builder } - /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. - fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { - self.inner.payload_cache.put(payload.clone()) + /// Cache a full payload, keyed on the `tree_hash_root` of the payload + fn cache_payload(&self, payload: ExecutionPayloadRef) -> Option> { + self.inner.payload_cache.put(payload.clone_from_ref()) } - /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + /// Attempt to retrieve a full payload from the payload cache by the payload root pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { self.inner.payload_cache.pop(root) } @@ -808,6 +846,18 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); + let relay_value = relay.data.message.value; + let local_value = *local.block_value(); + if local_value >= relay_value { + info!( + self.log(), + "Local block is more profitable than relay block"; + "local_block_value" => %local_value, + "relay_value" => %relay_value + ); + return Ok(ProvenancedPayload::Local(local)); + } + match verify_builder_bid( &relay, parent_hash, @@ -818,7 +868,10 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), Err(reason) if !reason.payload_invalid() => { info!( @@ -869,12 +922,18 @@ impl ExecutionLayer { spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), // If the payload is valid then use it. The local EE failed // to produce a payload so we have no alternative. Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder( - BlockProposalContents::Payload(relay.data.message.header), + BlockProposalContents::Payload { + payload: relay.data.message.header, + block_value: relay.data.message.value, + }, )), Err(reason) => { metrics::inc_counter_vec( @@ -988,7 +1047,7 @@ impl ExecutionLayer { payload_attributes: &PayloadAttributes, forkchoice_update_params: ForkchoiceUpdateParameters, current_fork: ForkName, - f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, + f: fn(&ExecutionLayer, ExecutionPayloadRef) -> Option>, ) -> Result, Error> { self.engine() .request(move |engine| async move { @@ -1071,9 +1130,9 @@ impl ExecutionLayer { ); engine.api.get_payload::(current_fork, payload_id).await }; - let (blob, payload) = tokio::join!(blob_fut, payload_fut); - let payload = payload.map(|full_payload| { - if full_payload.fee_recipient() != payload_attributes.suggested_fee_recipient() { + let (blob, payload_response) = tokio::join!(blob_fut, payload_fut); + let (execution_payload, block_value) = payload_response.map(|payload_response| { + if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() { error!( self.log(), "Inconsistent fee recipient"; @@ -1082,28 +1141,32 @@ impl ExecutionLayer { indicate that fees are being diverted to another address. Please \ ensure that the value of suggested_fee_recipient is set correctly and \ that the Execution Engine is trusted.", - "fee_recipient" => ?full_payload.fee_recipient(), + "fee_recipient" => ?payload_response.execution_payload_ref().fee_recipient(), "suggested_fee_recipient" => ?payload_attributes.suggested_fee_recipient(), ); } - if f(self, &full_payload).is_some() { + if f(self, payload_response.execution_payload_ref()).is_some() { warn!( self.log(), "Duplicate payload cached, this might indicate redundant proposal \ attempts." ); } - full_payload.into() + payload_response.into() })?; if let Some(blob) = blob.transpose()? { // FIXME(sean) cache blobs Ok(BlockProposalContents::PayloadAndBlobs { - payload, + payload: execution_payload.into(), + block_value, blobs: blob.blobs, kzg_commitments: blob.kzgs, }) } else { - Ok(BlockProposalContents::Payload(payload)) + Ok(BlockProposalContents::Payload { + payload: execution_payload.into(), + block_value, + }) } }) .await @@ -2089,7 +2152,10 @@ mod test { } } -fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { +fn noop( + _: &ExecutionLayer, + _: ExecutionPayloadRef, +) -> Option> { None } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 31a8a5da1..138c8f6bc 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,6 +1,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; +use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; use std::sync::Arc; @@ -211,14 +212,14 @@ pub async fn handle_rpc( JsonExecutionPayload::V1(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV1 { execution_payload, - block_value: 0.into(), + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), }) .unwrap() } JsonExecutionPayload::V2(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV2 { execution_payload, - block_value: 0.into(), + block_value: DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI.into(), }) .unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 06b5e81eb..40a0c41af 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,4 +1,4 @@ -use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; @@ -328,7 +328,7 @@ impl mev_build_rs::BlindedBlockProvider for MockBuilder { let mut message = BuilderBid { header, - value: ssz_rs::U256::default(), + value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, public_key: self.builder_sk.public_key(), }; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index ad73b2b4e..1a5d1fd19 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -29,6 +29,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, None, None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -41,6 +42,7 @@ impl MockExecutionLayer { terminal_block: u64, shanghai_time: Option, eip4844_time: Option, + builder_threshold: Option, jwt_key: Option, spec: ChainSpec, builder_url: Option, @@ -69,7 +71,7 @@ impl MockExecutionLayer { builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), - builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, + builder_profit_threshold: builder_threshold.unwrap_or(DEFAULT_BUILDER_THRESHOLD_WEI), ..Default::default() }; let el = diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index adf9358f0..077d29575 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -32,6 +32,8 @@ pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; +pub const DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI: u128 = 10_000_000_000_000_000; +pub const DEFAULT_BUILDER_PAYLOAD_VALUE_WEI: u128 = 20_000_000_000_000_000; pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: true, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 86733cf63..43099c7a9 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,9 +11,11 @@ use eth2::{ types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::Operation; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; +use execution_layer::test_utils::{ + Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, +}; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -72,38 +74,53 @@ struct ApiTester { mock_builder: Option>>, } +struct ApiTesterConfig { + spec: ChainSpec, + builder_threshold: Option, +} + +impl Default for ApiTesterConfig { + fn default() -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + Self { + spec, + builder_threshold: None, + } + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; - Self::new_from_spec(spec).await + Self::new_from_config(ApiTesterConfig::default()).await } pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { - let mut spec = E::default_spec(); - spec.shard_committee_period = 2; + let mut config = ApiTesterConfig::default(); // Set whether the chain has undergone each hard fork. if altair { - spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); } if bellatrix { - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); } - Self::new_from_spec(spec).await + Self::new_from_config(config).await } - pub async fn new_from_spec(spec: ChainSpec) -> Self { + pub async fn new_from_config(config: ApiTesterConfig) -> Self { // Get a random unused port + let spec = config.spec; let port = unused_port::unused_tcp_port().unwrap(); let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer_with_builder(beacon_url.clone()) + .mock_execution_layer_with_builder(beacon_url.clone(), config.builder_threshold) .build(), ); @@ -358,6 +375,28 @@ impl ApiTester { tester } + pub async fn new_mev_tester_no_builder_threshold() -> Self { + let mut config = ApiTesterConfig { + builder_threshold: Some(0), + spec: E::default_spec(), + }; + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + let tester = Self::new_from_config(config) + .await + .test_post_validator_register_validator() + .await; + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, + ))); + tester + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -3278,6 +3317,117 @@ impl ApiTester { self } + pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The builder's payload should've been chosen, so this cache should not be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // The local payload should've been chosen, so this cache should be populated + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -3747,9 +3897,9 @@ async fn get_events() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_events_altair() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - ApiTester::new_from_spec(spec) + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) .await .test_get_events_altair() .await; @@ -4262,6 +4412,18 @@ async fn builder_inadequate_builder_threshold() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit() { + ApiTester::new_mev_tester_no_builder_threshold() + .await + .test_builder_payload_chosen_when_more_profitable() + .await + .test_local_payload_chosen_when_equally_profitable() + .await + .test_local_payload_chosen_when_more_profitable() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index a57d41141..1721960f8 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -87,6 +87,16 @@ pub struct ExecutionPayload { pub withdrawals: Withdrawals, } +impl<'a, T: EthSpec> ExecutionPayloadRef<'a, T> { + // this emulates clone on a normal reference type + pub fn clone_from_ref(&self) -> ExecutionPayload { + map_execution_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.clone().into() + }) + } +} + impl ExecutionPayload { pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { match fork_name { From 5b398b19905565c4d16da8c4ed02ace5f1284bf7 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 5 Feb 2023 20:09:13 -0600 Subject: [PATCH 23/25] Don't Reject all Builder Bids After Capella (#3940) * Fix bug in Builder API Post-Capella * Fix Clippy Complaints --- beacon_node/execution_layer/src/lib.rs | 41 ++++++++++++++++---------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 8f206886e..752fc8f68 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -861,10 +861,10 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - payload_attributes.prev_randao(), - payload_attributes.timestamp(), + payload_attributes, Some(local.payload().block_number()), self.inner.builder_profit_threshold, + current_fork, spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( @@ -915,10 +915,10 @@ impl ExecutionLayer { match verify_builder_bid( &relay, parent_hash, - payload_attributes.prev_randao(), - payload_attributes.timestamp(), + payload_attributes, None, self.inner.builder_profit_threshold, + current_fork, spec, ) { Ok(()) => Ok(ProvenancedPayload::Builder( @@ -1875,6 +1875,11 @@ enum InvalidBuilderPayload { signature: Signature, pubkey: PublicKeyBytes, }, + #[allow(dead_code)] + WithdrawalsRoot { + payload: Hash256, + expected: Hash256, + }, } impl InvalidBuilderPayload { @@ -1889,6 +1894,7 @@ impl InvalidBuilderPayload { InvalidBuilderPayload::BlockNumber { .. } => true, InvalidBuilderPayload::Fork { .. } => true, InvalidBuilderPayload::Signature { .. } => true, + InvalidBuilderPayload::WithdrawalsRoot { .. } => true, } } } @@ -1924,6 +1930,13 @@ impl fmt::Display for InvalidBuilderPayload { "invalid payload signature {} for pubkey {}", signature, pubkey ), + InvalidBuilderPayload::WithdrawalsRoot { payload, expected } => { + write!( + f, + "payload withdrawals root was {} not {}", + payload, expected + ) + } } } } @@ -1932,10 +1945,10 @@ impl fmt::Display for InvalidBuilderPayload { fn verify_builder_bid>( bid: &ForkVersionedResponse>, parent_hash: ExecutionBlockHash, - prev_randao: Hash256, - timestamp: u64, + payload_attributes: &PayloadAttributes, block_number: Option, profit_threshold: Uint256, + current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { let is_signature_valid = bid.data.verify_signature(spec); @@ -1962,29 +1975,25 @@ fn verify_builder_bid>( payload: header.parent_hash(), expected: parent_hash, })) - } else if header.prev_randao() != prev_randao { + } else if header.prev_randao() != payload_attributes.prev_randao() { Err(Box::new(InvalidBuilderPayload::PrevRandao { payload: header.prev_randao(), - expected: prev_randao, + expected: payload_attributes.prev_randao(), })) - } else if header.timestamp() != timestamp { + } else if header.timestamp() != payload_attributes.timestamp() { Err(Box::new(InvalidBuilderPayload::Timestamp { payload: header.timestamp(), - expected: timestamp, + expected: payload_attributes.timestamp(), })) } else if block_number.map_or(false, |n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, })) - } else if !matches!(bid.version, Some(ForkName::Merge)) { - // Once fork information is added to the payload, we will need to - // check that the local and relay payloads match. At this point, if - // we are requesting a payload at all, we have to assume this is - // the Bellatrix fork. + } else if bid.version != Some(current_fork) { Err(Box::new(InvalidBuilderPayload::Fork { payload: bid.version, - expected: ForkName::Merge, + expected: current_fork, })) } else if !is_signature_valid { Err(Box::new(InvalidBuilderPayload::Signature { From 2073518f0ff03a86a6fc948bf1ff4deda24cf9c5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 7 Feb 2023 11:23:36 +1100 Subject: [PATCH 24/25] Remove unused `u256_hex_be_opt` (#3942) --- consensus/serde_utils/src/lib.rs | 1 - consensus/serde_utils/src/u256_hex_be_opt.rs | 169 ------------------- 2 files changed, 170 deletions(-) delete mode 100644 consensus/serde_utils/src/u256_hex_be_opt.rs diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 75fd6009b..92b5966c9 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -7,7 +7,6 @@ pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u256_hex_be; -pub mod u256_hex_be_opt; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be_opt.rs b/consensus/serde_utils/src/u256_hex_be_opt.rs deleted file mode 100644 index 8eadbf024..000000000 --- a/consensus/serde_utils/src/u256_hex_be_opt.rs +++ /dev/null @@ -1,169 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &Option, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - Some( - U256::from_str(&decoded) - .map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))), - ) - .transpose() -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: Option, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(0.into()) - }) - .unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(1.into()) - }) - .unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(256.into()) - }) - .unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(65.into()) - }) - .unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(1024.into()) - }) - .unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(U256::max_value() - 1) - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: Some(U256::max_value()) - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { - val: Some(0.into()) - }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { - val: Some(65.into()) - }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { - val: Some(1024.into()) - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: Some(U256::max_value() - 1) - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: Some(U256::max_value()) - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} From e062a7cf768d2c492e2efa13febc14fe9ec66081 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 7 Feb 2023 17:13:49 +1100 Subject: [PATCH 25/25] Broadcast address changes at Capella (#3919) * Add first efforts at broadcast * Tidy * Move broadcast code to client * Progress with broadcast impl * Rename to address change * Fix compile errors * Use `while` loop * Tidy * Flip broadcast condition * Switch to forgetting individual indices * Always broadcast when the node starts * Refactor into two functions * Add testing * Add another test * Tidy, add more testing * Tidy * Add test, rename enum * Rename enum again * Tidy * Break loop early * Add V15 schema migration * Bump schema version * Progress with migration * Update beacon_node/client/src/address_change_broadcast.rs Co-authored-by: Michael Sproul * Fix typo in function name --------- Co-authored-by: Michael Sproul --- Cargo.lock | 6 + beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- beacon_node/beacon_chain/src/schema_change.rs | 9 + .../src/schema_change/migration_schema_v15.rs | 78 +++++ beacon_node/beacon_chain/src/test_utils.rs | 15 + beacon_node/client/Cargo.toml | 4 + .../client/src/address_change_broadcast.rs | 322 ++++++++++++++++++ beacon_node/client/src/builder.rs | 20 ++ beacon_node/client/src/lib.rs | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 13 +- beacon_node/http_api/tests/fork_tests.rs | 17 + beacon_node/network/Cargo.toml | 1 + .../beacon_processor/worker/gossip_methods.rs | 8 +- beacon_node/operation_pool/Cargo.toml | 1 + .../src/bls_to_execution_changes.rs | 52 ++- beacon_node/operation_pool/src/lib.rs | 44 ++- beacon_node/operation_pool/src/persistence.rs | 88 +++-- beacon_node/store/src/metadata.rs | 2 +- .../types/src/bls_to_execution_change.rs | 1 + .../src/signed_bls_to_execution_change.rs | 1 + 21 files changed, 649 insertions(+), 40 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs create mode 100644 beacon_node/client/src/address_change_broadcast.rs diff --git a/Cargo.lock b/Cargo.lock index 18426b9e5..5aa7a3923 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1062,8 +1062,10 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", + "logging", "monitoring_api", "network", + "operation_pool", "parking_lot 0.12.1", "sensitive_url", "serde", @@ -1073,6 +1075,7 @@ dependencies = [ "slasher_service", "slog", "slot_clock", + "state_processing", "store", "task_executor", "time 0.3.17", @@ -3224,6 +3227,7 @@ dependencies = [ "logging", "lru 0.7.8", "network", + "operation_pool", "parking_lot 0.12.1", "proto_array", "safe_arith", @@ -5007,6 +5011,7 @@ dependencies = [ "lru_cache", "matches", "num_cpus", + "operation_pool", "rand 0.8.5", "rlp", "slog", @@ -5342,6 +5347,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", + "rand 0.8.5", "rayon", "serde", "serde_derive", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bf48c32a6..2aae03b72 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -70,7 +70,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; use parking_lot::{Mutex, RwLock}; use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -2289,10 +2289,11 @@ impl BeaconChain { pub fn import_bls_to_execution_change( &self, bls_to_execution_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { if self.eth1_chain.is_some() { self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change) + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) } else { false } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 8684bafe2..35202a3c5 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v12; mod migration_schema_v13; mod migration_schema_v14; +mod migration_schema_v15; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -123,6 +124,14 @@ pub fn migrate_schema( let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(14), SchemaVersion(15)) => { + let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(15), SchemaVersion(14)) => { + let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs new file mode 100644 index 000000000..f4adc2cf4 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -0,0 +1,78 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V14 op pool and transform it to V15. + let PersistedOperationPoolV14:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + } = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool_v14 + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + // Initialize with empty set + capella_bls_change_broadcast_indices: <_>::default(), + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v15( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V15 op pool and transform it to V14. + let PersistedOperationPoolV15 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + } = if let Some(PersistedOperationPool::::V15(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Forgetting address changes for Capella broadcast"; + "count" => capella_bls_change_broadcast_indices.len(), + ); + + let v14 = PersistedOperationPoolV14 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + }; + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index daba7115e..f1b9bc83c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -310,6 +310,21 @@ where self } + /// Initializes the BLS withdrawal keypairs for `num_keypairs` validators to + /// the "determistic" values, regardless of wether or not the validator has + /// a BLS or execution address in the genesis deposits. + /// + /// This aligns with the withdrawal commitments used in the "interop" + /// genesis states. + pub fn deterministic_withdrawal_keypairs(self, num_keypairs: usize) -> Self { + self.withdrawal_keypairs( + types::test_utils::generate_deterministic_keypairs(num_keypairs) + .into_iter() + .map(Option::Some) + .collect(), + ) + } + pub fn withdrawal_keypairs(mut self, withdrawal_keypairs: Vec>) -> Self { self.withdrawal_keypairs = withdrawal_keypairs; self diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d01f2505c..9a49843a9 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,6 +6,10 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" +logging = { path = "../../common/logging" } +state_processing = { path = "../../consensus/state_processing" } +operation_pool = { path = "../operation_pool" } +tokio = "1.14.0" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/address_change_broadcast.rs b/beacon_node/client/src/address_change_broadcast.rs new file mode 100644 index 000000000..272ee908f --- /dev/null +++ b/beacon_node/client/src/address_change_broadcast.rs @@ -0,0 +1,322 @@ +use crate::*; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{debug, info, warn, Logger}; +use slot_clock::SlotClock; +use std::cmp; +use std::collections::HashSet; +use std::mem; +use std::time::Duration; +use tokio::sync::mpsc::UnboundedSender; +use tokio::time::sleep; +use types::EthSpec; + +/// The size of each chunk of addresses changes to be broadcast at the Capella +/// fork. +const BROADCAST_CHUNK_SIZE: usize = 128; +/// The delay between broadcasting each chunk. +const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500); + +/// If the Capella fork has already been reached, `broadcast_address_changes` is +/// called immediately. +/// +/// If the Capella fork has not been reached, waits until the start of the fork +/// epoch and then calls `broadcast_address_changes`. +pub async fn broadcast_address_changes_at_capella( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let spec = &chain.spec; + let slot_clock = &chain.slot_clock; + + let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch { + epoch.start_slot(T::EthSpec::slots_per_epoch()) + } else { + // Exit now if Capella is not defined. + return; + }; + + // Wait until the Capella fork epoch. + while chain.slot().map_or(true, |slot| slot < capella_fork_slot) { + match slot_clock.duration_to_slot(capella_fork_slot) { + Some(duration) => { + // Sleep until the Capella fork. + sleep(duration).await; + break; + } + None => { + // We were unable to read the slot clock wait another slot + // and then try again. + sleep(slot_clock.slot_duration()).await; + } + } + } + + // The following function will be called in two scenarios: + // + // 1. The node has been running for some time and the Capella fork has just + // been reached. + // 2. The node has just started and it is *after* the Capella fork. + broadcast_address_changes(chain, network_send, log).await +} + +/// Broadcasts any address changes that are flagged for broadcasting at the +/// Capella fork epoch. +/// +/// Address changes are published in chunks, with a delay between each chunk. +/// This helps reduce the load on the P2P network and also helps prevent us from +/// clogging our `network_send` channel and being late to publish +/// blocks, attestations, etc. +pub async fn broadcast_address_changes( + chain: &BeaconChain, + network_send: UnboundedSender>, + log: &Logger, +) { + let head = chain.head_snapshot(); + let mut changes = chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec); + + while !changes.is_empty() { + // This `split_off` approach is to allow us to have owned chunks of the + // `changes` vec. The `std::slice::Chunks` method uses references and + // the `itertools` iterator that achives this isn't `Send` so it doesn't + // work well with the `sleep` at the end of the loop. + let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len())); + let chunk = mem::replace(&mut changes, tail); + + let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE); + let mut num_ok = 0; + let mut num_err = 0; + + // Publish each individual address change. + for address_change in chunk { + let validator_index = address_change.message.validator_index; + + let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change)); + let message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + // It seems highly unlikely that this unbounded send will fail, but + // we handle the result nontheless. + if let Err(e) = network_send.send(message) { + debug!( + log, + "Failed to publish change message"; + "error" => ?e, + "validator_index" => validator_index + ); + num_err += 1; + } else { + debug!( + log, + "Published address change message"; + "validator_index" => validator_index + ); + num_ok += 1; + published_indices.insert(validator_index); + } + } + + // Remove any published indices from the list of indices that need to be + // published. + chain + .op_pool + .register_indices_broadcasted_at_capella(&published_indices); + + info!( + log, + "Published address change messages"; + "num_published" => num_ok, + ); + + if num_err > 0 { + warn!( + log, + "Failed to publish address changes"; + "info" => "failed messages will be retried", + "num_unable_to_publish" => num_err, + ); + } + + sleep(BROADCAST_CHUNK_DELAY).await; + } + + debug!( + log, + "Address change routine complete"; + ); +} + +#[cfg(not(debug_assertions))] // Tests run too slow in debug. +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use operation_pool::ReceivedPreCapella; + use state_processing::{SigVerifiedOp, VerifyOperation}; + use std::collections::HashSet; + use tokio::sync::mpsc; + use types::*; + + type E = MainnetEthSpec; + + pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3; + pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42); + + struct Tester { + harness: BeaconChainHarness>, + /// Changes which should be broadcast at the Capella fork. + received_pre_capella_changes: Vec>, + /// Changes which should *not* be broadcast at the Capella fork. + not_received_pre_capella_changes: Vec>, + } + + impl Tester { + fn new() -> Self { + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); + let capella_fork_epoch = Epoch::new(2); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + spec.capella_fork_epoch = Some(capella_fork_epoch); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .logger(logging::test_logger()) + .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { + harness, + received_pre_capella_changes: <_>::default(), + not_received_pre_capella_changes: <_>::default(), + } + } + + fn produce_verified_address_change( + &self, + validator_index: u64, + ) -> SigVerifiedOp { + let change = self + .harness + .make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS); + let head = self.harness.chain.head_snapshot(); + + change + .validate(&head.beacon_state, &self.harness.spec) + .unwrap() + } + + fn produce_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + fn produce_not_received_pre_capella_changes(mut self, indices: Vec) -> Self { + for validator_index in indices { + self.not_received_pre_capella_changes + .push(self.produce_verified_address_change(validator_index)); + } + self + } + + async fn run(self) { + let harness = self.harness; + let chain = harness.chain.clone(); + + let mut broadcast_indices = HashSet::new(); + for change in self.received_pre_capella_changes { + broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::Yes); + } + + let mut non_broadcast_indices = HashSet::new(); + for change in self.not_received_pre_capella_changes { + non_broadcast_indices.insert(change.as_inner().message.validator_index); + chain + .op_pool + .insert_bls_to_execution_change(change, ReceivedPreCapella::No); + } + + harness.set_current_slot( + chain + .spec + .capella_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()), + ); + + let (sender, mut receiver) = mpsc::unbounded_channel(); + + broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await; + + let mut broadcasted_changes = vec![]; + while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await { + match messages.pop().unwrap() { + PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change), + _ => panic!("unexpected message"), + } + } + + assert_eq!( + broadcasted_changes.len(), + broadcast_indices.len(), + "all expected changes should have been broadcast" + ); + + for broadcasted in &broadcasted_changes { + assert!( + !non_broadcast_indices.contains(&broadcasted.message.validator_index), + "messages not flagged for broadcast should not have been broadcast" + ); + } + + let head = chain.head_snapshot(); + assert!( + chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &head.beacon_state, + &chain.spec, + ) + .is_empty(), + "there shouldn't be any capella broadcast changes left in the op pool" + ); + } + } + + // Useful for generating even-numbered indices. Required since only even + // numbered genesis validators have BLS credentials. + fn even_indices(start: u64, count: usize) -> Vec { + (start..).filter(|i| i % 2 == 0).take(count).collect() + } + + #[tokio::test] + async fn one_chunk() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, 4)) + .produce_not_received_pre_capella_changes(even_indices(10, 4)) + .run() + .await; + } + + #[tokio::test] + async fn multiple_chunks() { + Tester::new() + .produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2)) + .run() + .await; + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3b016ebda..5fa2fddc3 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,3 +1,4 @@ +use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -802,6 +803,25 @@ where // Spawns a routine that polls the `exchange_transition_configuration` endpoint. execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone()); } + + // Spawn a service to publish BLS to execution changes at the Capella fork. + if let Some(network_senders) = self.network_senders { + let inner_chain = beacon_chain.clone(); + let broadcast_context = + runtime_context.service_context("addr_bcast".to_string()); + let log = broadcast_context.log().clone(); + broadcast_context.executor.spawn( + async move { + broadcast_address_changes_at_capella( + &inner_chain, + network_senders.network_send(), + &log, + ) + .await + }, + "addr_broadcast", + ); + } } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 24df87408..b0184dc0f 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,6 @@ extern crate slog; +mod address_change_broadcast; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0dc918f42..5110a73ed 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -36,6 +36,7 @@ tree_hash = "0.4.1" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } +operation_pool = { path = "../operation_pool" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index e0f8bcf2a..0edbaf8f7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -35,6 +35,7 @@ use eth2::types::{ use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; @@ -1696,8 +1697,12 @@ pub fn serve( .to_execution_address; // New to P2P *and* op pool, gossip immediately if post-Capella. - let publish = chain.current_slot_is_post_capella().unwrap_or(false); - if publish { + let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { publish_pubsub_message( &network_tx, PubsubMessage::BlsToExecutionChange(Box::new( @@ -1708,14 +1713,14 @@ pub fn serve( // Import to op pool (may return `false` if there's a race). let imported = - chain.import_bls_to_execution_change(verified_address_change); + chain.import_bls_to_execution_change(verified_address_change, received_pre_capella); info!( log, "Processed BLS to execution change"; "validator_index" => validator_index, "address" => ?address, - "published" => publish, + "published" => matches!(received_pre_capella, ReceivedPreCapella::No), "imported" => imported, ); } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index e61470fe9..614412356 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -6,6 +6,7 @@ use beacon_chain::{ }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, @@ -438,6 +439,8 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .await .unwrap(); + let expected_received_pre_capella_messages = valid_address_changes[..num_pre_capella].to_vec(); + // Conflicting changes for the same validators should all fail. let error = client .post_beacon_pool_bls_to_execution_changes(&conflicting_address_changes[..num_pre_capella]) @@ -464,6 +467,20 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { harness.extend_to_slot(capella_slot - 1).await; assert_eq!(harness.head_slot(), capella_slot - 1); + assert_eq!( + harness + .chain + .op_pool + .get_bls_to_execution_changes_received_pre_capella( + &harness.chain.head_snapshot().beacon_state, + &spec, + ) + .into_iter() + .collect::>(), + HashSet::from_iter(expected_received_pre_capella_messages.into_iter()), + "all pre-capella messages should be queued for capella broadcast" + ); + // Add Capella blocks which should be full of BLS to execution changes. for i in 0..validator_count / max_bls_to_execution_changes { let head_block_root = harness.extend_slots(1).await; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b1d928eec..5ce331169 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -45,6 +45,7 @@ tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" ethereum-types = { version = "0.14.1", optional = true } +operation_pool = { path = "../operation_pool" } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 44d611189..f2b1b3a26 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -12,6 +12,7 @@ use beacon_chain::{ GossipVerifiedBlock, NotifyExecutionLayer, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; +use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; @@ -1251,7 +1252,12 @@ impl Worker { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); - self.chain.import_bls_to_execution_change(change); + // Address change messages from gossip are only processed *after* the + // Capella fork epoch. + let received_pre_capella = ReceivedPreCapella::No; + + self.chain + .import_bls_to_execution_change(change, received_pre_capella); debug!( self.log, diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 848323358..cc4eacde8 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -19,6 +19,7 @@ serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } bitvec = "1" +rand = "0.8.5" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 84513d466..c73666e14 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -1,11 +1,20 @@ use state_processing::SigVerifiedOp; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::sync::Arc; use types::{ AbstractExecPayload, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, SignedBlsToExecutionChange, }; +/// Indicates if a `BlsToExecutionChange` was received before or after the +/// Capella fork. This is used to know which messages we should broadcast at the +/// Capella fork epoch. +#[derive(Copy, Clone)] +pub enum ReceivedPreCapella { + Yes, + No, +} + /// Pool of BLS to execution changes that maintains a LIFO queue and an index by validator. /// /// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, @@ -16,6 +25,9 @@ pub struct BlsToExecutionChanges { by_validator_index: HashMap>>, /// Last-in-first-out (LIFO) queue of verified messages. queue: Vec>>, + /// Contains a set of validator indices which need to have their changes + /// broadcast at the capella epoch. + received_pre_capella_indices: HashSet, } impl BlsToExecutionChanges { @@ -31,16 +43,18 @@ impl BlsToExecutionChanges { pub fn insert( &mut self, verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { + let validator_index = verified_change.as_inner().message.validator_index; // Wrap in an `Arc` once on insert. let verified_change = Arc::new(verified_change); - match self - .by_validator_index - .entry(verified_change.as_inner().message.validator_index) - { + match self.by_validator_index.entry(validator_index) { Entry::Vacant(entry) => { self.queue.push(verified_change.clone()); entry.insert(verified_change); + if matches!(received_pre_capella, ReceivedPreCapella::Yes) { + self.received_pre_capella_indices.insert(validator_index); + } true } Entry::Occupied(_) => false, @@ -61,6 +75,24 @@ impl BlsToExecutionChanges { self.queue.iter().rev() } + /// Returns only those which are flagged for broadcasting at the Capella + /// fork. Uses FIFO ordering, although we expect this list to be shuffled by + /// the caller. + pub fn iter_received_pre_capella( + &self, + ) -> impl Iterator>> { + self.queue.iter().filter(|address_change| { + self.received_pre_capella_indices + .contains(&address_change.as_inner().message.validator_index) + }) + } + + /// Returns the set of indicies which should have their address changes + /// broadcast at the Capella fork. + pub fn iter_pre_capella_indices(&self) -> impl Iterator { + self.received_pre_capella_indices.iter() + } + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. /// /// The block check is necessary to avoid pruning too eagerly and losing the ability to include @@ -102,4 +134,14 @@ impl BlsToExecutionChanges { self.by_validator_index.remove(&validator_index); } } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&mut self, broadcasted: &HashSet) { + self.received_pre_capella_indices = self + .received_pre_capella_indices + .difference(broadcasted) + .copied() + .collect(); + } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 4643addad..d401deb89 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -9,12 +9,13 @@ mod persistence; mod reward_cache; mod sync_aggregate_id; +pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::AttMaxCover; pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, - PersistedOperationPoolV5, + PersistedOperationPoolV15, PersistedOperationPoolV5, }; pub use reward_cache::RewardCache; @@ -24,6 +25,8 @@ use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; +use rand::seq::SliceRandom; +use rand::thread_rng; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -533,10 +536,11 @@ impl OperationPool { pub fn insert_bls_to_execution_change( &self, verified_change: SigVerifiedOp, + received_pre_capella: ReceivedPreCapella, ) -> bool { self.bls_to_execution_changes .write() - .insert(verified_change) + .insert(verified_change, received_pre_capella) } /// Get a list of execution changes for inclusion in a block. @@ -562,6 +566,42 @@ impl OperationPool { ) } + /// Get a list of execution changes to be broadcast at the Capella fork. + /// + /// The list that is returned will be shuffled to help provide a fair + /// broadcast of messages. + pub fn get_bls_to_execution_changes_received_pre_capella( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec { + let mut changes = filter_limit_operations( + self.bls_to_execution_changes + .read() + .iter_received_pre_capella(), + |address_change| { + address_change.signature_is_still_valid(&state.fork()) + && state + .get_validator(address_change.as_inner().message.validator_index as usize) + .map_or(false, |validator| { + !validator.has_eth1_withdrawal_credential(spec) + }) + }, + |address_change| address_change.as_inner().clone(), + usize::max_value(), + ); + changes.shuffle(&mut thread_rng()); + changes + } + + /// Removes `broadcasted` validators from the set of validators that should + /// have their BLS changes broadcast at the Capella fork boundary. + pub fn register_indices_broadcasted_at_capella(&self, broadcasted: &HashSet) { + self.bls_to_execution_changes + .write() + .register_indices_broadcasted_at_capella(broadcasted); + } + /// Prune BLS to execution changes that have been applied to the state more than 1 block ago. pub fn prune_bls_to_execution_changes>( &self, diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 4948040ae..65354e01a 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,6 +1,6 @@ use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; -use crate::bls_to_execution_changes::BlsToExecutionChanges; +use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; @@ -9,6 +9,8 @@ use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::SigVerifiedOp; +use std::collections::HashSet; +use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -19,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { #[superstruct(only(V5))] pub attestations_v5: Vec<(AttestationId, Vec>)>, /// Attestations and their attesting indices. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, @@ -41,23 +43,27 @@ pub struct PersistedOperationPool { #[superstruct(only(V5))] pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub attester_slashings: Vec, T>>, /// [DEPRECATED] Proposer slashings. #[superstruct(only(V5))] pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, /// [DEPRECATED] Voluntary exits. #[superstruct(only(V5))] pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12, V14))] + #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, /// BLS to Execution Changes - #[superstruct(only(V14))] + #[superstruct(only(V14, V15))] pub bls_to_execution_changes: Vec>, + /// Validator indices with BLS to Execution Changes to be broadcast at the + /// Capella fork. + #[superstruct(only(V15))] + pub capella_bls_change_broadcast_indices: Vec, } impl PersistedOperationPool { @@ -110,18 +116,26 @@ impl PersistedOperationPool { .map(|bls_to_execution_change| (**bls_to_execution_change).clone()) .collect(); - PersistedOperationPool::V14(PersistedOperationPoolV14 { + let capella_bls_change_broadcast_indices = operation_pool + .bls_to_execution_changes + .read() + .iter_pre_capella_indices() + .copied() + .collect(); + + PersistedOperationPool::V15(PersistedOperationPoolV15 { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, bls_to_execution_changes, + capella_bls_change_broadcast_indices, }) } /// Reconstruct an `OperationPool`. - pub fn into_operation_pool(self) -> Result, OpPoolError> { + pub fn into_operation_pool(mut self) -> Result, OpPoolError> { let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( self.proposer_slashings()? @@ -142,33 +156,43 @@ impl PersistedOperationPool { PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { return Err(OpPoolError::IncorrectOpPoolVariant) } - PersistedOperationPool::V14(ref pool) => { + PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in pool.attestations.clone() { + for (att, attesting_indices) in self.attestations()?.clone() { map.insert(att, attesting_indices); } RwLock::new(map) } }; - let bls_to_execution_changes = match self { - PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { - return Err(OpPoolError::IncorrectOpPoolVariant) + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { + let persisted_changes = mem::take(persisted_changes); + + let broadcast_indices = + if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { + mem::take(indices).into_iter().collect() + } else { + HashSet::new() + }; + + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); } - PersistedOperationPool::V14(pool) => { - let mut bls_to_execution_changes = BlsToExecutionChanges::default(); - for bls_to_execution_change in pool.bls_to_execution_changes { - bls_to_execution_changes.insert(bls_to_execution_change); - } - RwLock::new(bls_to_execution_changes) - } - }; + } let op_pool = OperationPool { attestations, sync_contributions, attester_slashings, proposer_slashings, voluntary_exits, - bls_to_execution_changes, + bls_to_execution_changes: RwLock::new(bls_to_execution_changes), reward_cache: Default::default(), _phantom: Default::default(), }; @@ -204,6 +228,20 @@ impl StoreItem for PersistedOperationPoolV12 { } } +impl StoreItem for PersistedOperationPoolV14 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) + } +} + /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { @@ -216,8 +254,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV14::from_ssz_bytes(bytes) - .map(Self::V14) + PersistedOperationPoolV15::from_ssz_bytes(bytes) + .map(Self::V15) .map_err(Into::into) } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index fb5769635..729b36ff2 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(15); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index cb73e43f9..b279515bd 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -10,6 +10,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, + Eq, Hash, Clone, Serialize, diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/signed_bls_to_execution_change.rs index 92b79fad3..2b17095ae 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/signed_bls_to_execution_change.rs @@ -10,6 +10,7 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, + Eq, Hash, Clone, Serialize,