Rename random to prev_randao (#3040)

## Issue Addressed

As discussed on last-night's consensus call, the testnets next week will target the [Kiln Spec v2](https://hackmd.io/@n0ble/kiln-spec).

Presently, we support Kiln V1. V2 is backwards compatible, except for renaming `random` to `prev_randao` in:

- https://github.com/ethereum/execution-apis/pull/180
- https://github.com/ethereum/consensus-specs/pull/2835

With this PR we'll no longer be compatible with the existing Kintsugi and Kiln testnets, however we'll be ready for the testnets next week. I raised this breaking change in the call last night, we are all keen to move forward and break things.

We now target the [`merge-kiln-v2`](https://github.com/MariusVanDerWijden/go-ethereum/tree/merge-kiln-v2) branch for interop with Geth. This required adding the `--http.aauthport` to the tester to avoid a port conflict at startup.

### Changes to exec integration tests

There's some change in the `merge-kiln-v2` version of Geth that means it can't compile on a vanilla Github runner. Bumping the `go` version on the runner solved this issue.

Whilst addressing this, I refactored the `testing/execution_integration` crate to be a *binary* rather than a *library* with tests. This means that we don't need to run the `build.rs` and build Geth whenever someone runs `make lint` or `make test-release`. This is nice for everyday users, but it's also nice for CI so that we can have a specific runner for these tests and we don't need to ensure *all* runners support everything required to build all execution clients.

## More Info

- [x] ~~EF tests are failing since the rename has broken some tests that reference the old field name. I have been told there will be new tests released in the coming days (25/02/22 or 26/02/22).~~
This commit is contained in:
Paul Hauner 2022-03-03 02:10:57 +00:00
parent 4bf1af4e85
commit aea43b626b
31 changed files with 304 additions and 221 deletions

View File

@ -190,6 +190,9 @@ jobs:
needs: cargo-fmt
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- name: Get latest version of stable Rust
run: rustup update stable
- name: Run exec engine integration tests in release

View File

@ -53,7 +53,7 @@ use eth2::types::{
EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty,
};
use execution_layer::{ExecutionLayer, PayloadStatus};
use fork_choice::{AttestationFromBlock, ForkChoice};
use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation};
use futures::channel::mpsc::Sender;
use itertools::process_results;
use itertools::Itertools;
@ -3180,49 +3180,29 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// This method must be called whenever an execution engine indicates that a payload is
/// invalid.
///
/// If the `latest_root` is known to fork-choice it will be invalidated. If it is not known, an
/// error will be returned.
/// Fork choice will be run after the invalidation. The client may be shut down if the `op`
/// results in the justified checkpoint being invalidated.
///
/// If `latest_valid_hash` is `None` or references a block unknown to fork choice, no other
/// blocks will be invalidated. If `latest_valid_hash` is a block known to fork choice, all
/// blocks between the `latest_root` and the `latest_valid_hash` will be invalidated (which may
/// cause further, second-order invalidations).
///
/// ## Notes
///
/// Use these rules to set `latest_root`:
///
/// - When `forkchoiceUpdated` indicates an invalid block, set `latest_root` to be the
/// block root that was the head of the chain when `forkchoiceUpdated` was called.
/// - When `executePayload` returns an invalid block *during* block import, set
/// `latest_root` to be the parent of the beacon block containing the invalid
/// payload (because the block containing the payload is not present in fork choice).
/// - When `executePayload` returns an invalid block *after* block import, set
/// `latest_root` to be root of the beacon block containing the invalid payload.
/// See the documentation of `InvalidationOperation` for information about defining `op`.
pub fn process_invalid_execution_payload(
&self,
latest_root: Hash256,
latest_valid_hash: Option<ExecutionBlockHash>,
op: &InvalidationOperation,
) -> Result<(), Error> {
debug!(
self.log,
"Invalid execution payload in block";
"latest_valid_hash" => ?latest_valid_hash,
"latest_root" => ?latest_root,
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
"block_root" => ?op.block_root(),
);
// Update fork choice.
if let Err(e) = self
.fork_choice
.write()
.on_invalid_execution_payload(latest_root, latest_valid_hash)
{
if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) {
crit!(
self.log,
"Failed to process invalid payload";
"error" => ?e,
"latest_valid_hash" => ?latest_valid_hash,
"latest_root" => ?latest_root,
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
"block_root" => ?op.block_root(),
);
}
@ -3763,8 +3743,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// The execution engine has stated that all blocks between the
// `head_execution_block_hash` and `latest_valid_hash` are invalid.
self.process_invalid_execution_payload(
&InvalidationOperation::InvalidateMany {
head_block_root,
Some(*latest_valid_hash),
always_invalidate_head: true,
latest_valid_ancestor: *latest_valid_hash,
},
)?;
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
@ -3781,7 +3764,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
//
// Using a `None` latest valid ancestor will result in only the head block
// being invalidated (no ancestors).
self.process_invalid_execution_payload(head_block_root, None)?;
self.process_invalid_execution_payload(
&InvalidationOperation::InvalidateOne {
block_root: head_block_root,
},
)?;
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
}

View File

@ -12,7 +12,7 @@ use crate::{
ExecutionPayloadError,
};
use execution_layer::PayloadStatus;
use fork_choice::PayloadVerificationStatus;
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
use proto_array::{Block as ProtoBlock, ExecutionStatus};
use slog::debug;
use slot_clock::SlotClock;
@ -68,7 +68,13 @@ pub fn notify_new_payload<T: BeaconChainTypes>(
// This block has not yet been applied to fork choice, so the latest block that was
// imported to fork choice was the parent.
let latest_root = block.parent_root();
chain.process_invalid_execution_payload(latest_root, Some(latest_valid_hash))?;
chain.process_invalid_execution_payload(
&InvalidationOperation::InvalidateMany {
head_block_root: latest_root,
always_invalidate_head: false,
latest_valid_ancestor: latest_valid_hash,
},
)?;
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
}
@ -145,11 +151,19 @@ pub fn validate_merge_block<T: BeaconChainTypes>(
.slot_clock
.now()
.ok_or(BeaconChainError::UnableToReadSlot)?;
// Check the optimistic sync conditions. Note that because this is the merge block,
// the justified checkpoint can't have execution enabled so we only need to check the
// current slot is at least SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the block
// https://github.com/ethereum/consensus-specs/blob/v1.1.9/sync/optimistic.md#when-to-optimistically-import-blocks
if block.slot() + chain.spec.safe_slots_to_import_optimistically <= current_slot {
// Ensure the block is a candidate for optimistic import.
if chain
.fork_choice
.read()
.is_optimistic_candidate_block(
current_slot,
block.slot(),
&block.parent_root(),
&chain.spec,
)
.map_err(BeaconChainError::from)?
{
debug!(
chain.log,
"Optimistically accepting terminal block";

View File

@ -231,8 +231,10 @@ fn valid_invalid_syncing() {
/// `latest_valid_hash`.
#[test]
fn invalid_payload_invalidates_parent() {
let mut rig = InvalidPayloadRig::new();
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
rig.import_block(Payload::Valid); // Import a valid transition block.
rig.move_to_first_justification(Payload::Syncing);
let roots = vec![
rig.import_block(Payload::Syncing),
@ -258,6 +260,7 @@ fn invalid_payload_invalidates_parent() {
fn justified_checkpoint_becomes_invalid() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
rig.import_block(Payload::Valid); // Import a valid transition block.
rig.move_to_first_justification(Payload::Syncing);
let justified_checkpoint = rig.head_info().current_justified_checkpoint;
@ -305,7 +308,9 @@ fn pre_finalized_latest_valid_hash() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
let mut blocks = vec![];
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing));
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
@ -330,8 +335,12 @@ fn pre_finalized_latest_valid_hash() {
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
let slot = Slot::new(i);
let root = rig.block_root_at_slot(slot).unwrap();
if slot == 1 {
assert!(rig.execution_status(root).is_valid());
} else {
assert!(rig.execution_status(root).is_not_verified());
}
}
}
/// Ensure that a `latest_valid_hash` will:
@ -344,7 +353,10 @@ fn latest_valid_hash_will_validate() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
let blocks = rig.build_blocks(4, Payload::Syncing);
let mut blocks = vec![];
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
blocks.extend(rig.build_blocks(4, Payload::Syncing));
let latest_valid_root = rig
.block_root_at_slot(Slot::new(LATEST_VALID_SLOT))
@ -357,7 +369,7 @@ fn latest_valid_hash_will_validate() {
assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT);
for slot in 0..=4 {
for slot in 0..=5 {
let slot = Slot::new(slot);
let root = if slot > 0 {
// If not the genesis slot, check the blocks we just produced.
@ -386,7 +398,9 @@ fn latest_valid_hash_is_junk() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
let mut blocks = vec![];
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing));
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
@ -408,8 +422,12 @@ fn latest_valid_hash_is_junk() {
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
let slot = Slot::new(i);
let root = rig.block_root_at_slot(slot).unwrap();
if slot == 1 {
assert!(rig.execution_status(root).is_valid());
} else {
assert!(rig.execution_status(root).is_not_verified());
}
}
}
/// Check that descendants of invalid blocks are also invalidated.
@ -421,6 +439,7 @@ fn invalidates_all_descendants() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
rig.import_block(Payload::Valid); // Import a valid transition block.
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
@ -493,6 +512,7 @@ fn switches_heads() {
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
rig.import_block(Payload::Valid); // Import a valid transition block.
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
@ -571,8 +591,9 @@ fn invalid_during_processing() {
#[test]
fn invalid_after_optimistic_sync() {
let mut rig = InvalidPayloadRig::new();
let mut rig = InvalidPayloadRig::new().enable_attestations();
rig.move_to_terminal_block();
rig.import_block(Payload::Valid); // Import a valid transition block.
let mut roots = vec![
rig.import_block(Payload::Syncing),

View File

@ -110,7 +110,7 @@ pub struct ExecutionBlock {
#[derive(Clone, Copy, Debug)]
pub struct PayloadAttributes {
pub timestamp: u64,
pub random: Hash256,
pub prev_randao: Hash256,
pub suggested_fee_recipient: Address,
}

View File

@ -288,7 +288,7 @@ mod test {
"stateRoot": HASH_01,
"receiptsRoot": HASH_00,
"logsBloom": LOGS_BLOOM_01,
"random": HASH_01,
"prevRandao": HASH_01,
"blockNumber": "0x0",
"gasLimit": "0x1",
"gasUsed": "0x2",
@ -441,7 +441,7 @@ mod test {
},
Some(PayloadAttributes {
timestamp: 5,
random: Hash256::zero(),
prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::repeat_byte(0),
}),
)
@ -458,7 +458,7 @@ mod test {
},
{
"timestamp":"0x5",
"random": HASH_00,
"prevRandao": HASH_00,
"suggestedFeeRecipient": ADDRESS_00
}]
}),
@ -495,7 +495,7 @@ mod test {
state_root: Hash256::repeat_byte(1),
receipts_root: Hash256::repeat_byte(0),
logs_bloom: vec![1; 256].into(),
random: Hash256::repeat_byte(1),
prev_randao: Hash256::repeat_byte(1),
block_number: 0,
gas_limit: 1,
gas_used: 2,
@ -517,7 +517,7 @@ mod test {
"stateRoot": HASH_01,
"receiptsRoot": HASH_00,
"logsBloom": LOGS_BLOOM_01,
"random": HASH_01,
"prevRandao": HASH_01,
"blockNumber": "0x0",
"gasLimit": "0x1",
"gasUsed": "0x2",
@ -596,7 +596,7 @@ mod test {
},
Some(PayloadAttributes {
timestamp: 5,
random: Hash256::zero(),
prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
})
)
@ -613,7 +613,7 @@ mod test {
},
{
"timestamp":"0x5",
"random": HASH_00,
"prevRandao": HASH_00,
"suggestedFeeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"
}]
})
@ -643,7 +643,7 @@ mod test {
},
Some(PayloadAttributes {
timestamp: 5,
random: Hash256::zero(),
prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
})
)
@ -687,7 +687,7 @@ mod test {
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": LOGS_BLOOM_00,
"random": HASH_00,
"prevRandao": HASH_00,
"blockNumber":"0x1",
"gasLimit":"0x1c95111",
"gasUsed":"0x0",
@ -710,7 +710,7 @@ mod test {
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
logs_bloom: vec![0; 256].into(),
random: Hash256::zero(),
prev_randao: Hash256::zero(),
block_number: 1,
gas_limit: u64::from_str_radix("1c95111",16).unwrap(),
gas_used: 0,
@ -735,7 +735,7 @@ mod test {
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
logs_bloom: vec![0; 256].into(),
random: Hash256::zero(),
prev_randao: Hash256::zero(),
block_number: 1,
gas_limit: u64::from_str_radix("1c9c380",16).unwrap(),
gas_used: 0,
@ -757,7 +757,7 @@ mod test {
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsBloom": LOGS_BLOOM_00,
"random": HASH_00,
"prevRandao": HASH_00,
"blockNumber":"0x1",
"gasLimit":"0x1c9c380",
"gasUsed":"0x0",

View File

@ -64,7 +64,7 @@ pub struct JsonExecutionPayloadV1<T: EthSpec> {
pub receipts_root: Hash256,
#[serde(with = "serde_logs_bloom")]
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
pub random: Hash256,
pub prev_randao: Hash256,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub block_number: u64,
#[serde(with = "eth2_serde_utils::u64_hex_be")]
@ -91,7 +91,7 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayloadV1<T> {
state_root,
receipts_root,
logs_bloom,
random,
prev_randao,
block_number,
gas_limit,
gas_used,
@ -108,7 +108,7 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayloadV1<T> {
state_root,
receipts_root,
logs_bloom,
random,
prev_randao,
block_number,
gas_limit,
gas_used,
@ -130,7 +130,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
state_root,
receipts_root,
logs_bloom,
random,
prev_randao,
block_number,
gas_limit,
gas_used,
@ -147,7 +147,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
state_root,
receipts_root,
logs_bloom,
random,
prev_randao,
block_number,
gas_limit,
gas_used,
@ -165,7 +165,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
pub struct JsonPayloadAttributesV1 {
#[serde(with = "eth2_serde_utils::u64_hex_be")]
pub timestamp: u64,
pub random: Hash256,
pub prev_randao: Hash256,
pub suggested_fee_recipient: Address,
}
@ -174,13 +174,13 @@ impl From<PayloadAttributes> for JsonPayloadAttributesV1 {
// Use this verbose deconstruction pattern to ensure no field is left unused.
let PayloadAttributes {
timestamp,
random,
prev_randao,
suggested_fee_recipient,
} = p;
Self {
timestamp,
random,
prev_randao,
suggested_fee_recipient,
}
}
@ -191,13 +191,13 @@ impl From<JsonPayloadAttributesV1> for PayloadAttributes {
// Use this verbose deconstruction pattern to ensure no field is left unused.
let JsonPayloadAttributesV1 {
timestamp,
random,
prev_randao,
suggested_fee_recipient,
} = j;
Self {
timestamp,
random,
prev_randao,
suggested_fee_recipient,
}
}

View File

@ -50,7 +50,7 @@ impl Logging {
struct PayloadIdCacheKey {
pub head_block_hash: ExecutionBlockHash,
pub timestamp: u64,
pub random: Hash256,
pub prev_randao: Hash256,
pub suggested_fee_recipient: Address,
}
@ -77,7 +77,7 @@ impl<T> Engine<T> {
&self,
head_block_hash: ExecutionBlockHash,
timestamp: u64,
random: Hash256,
prev_randao: Hash256,
suggested_fee_recipient: Address,
) -> Option<PayloadId> {
self.payload_id_cache
@ -86,7 +86,7 @@ impl<T> Engine<T> {
.get(&PayloadIdCacheKey {
head_block_hash,
timestamp,
random,
prev_randao,
suggested_fee_recipient,
})
.cloned()
@ -393,7 +393,7 @@ impl PayloadIdCacheKey {
Self {
head_block_hash: state.head_block_hash,
timestamp: attributes.timestamp,
random: attributes.random,
prev_randao: attributes.prev_randao,
suggested_fee_recipient: attributes.suggested_fee_recipient,
}
}

View File

@ -392,7 +392,7 @@ impl ExecutionLayer {
&self,
parent_hash: ExecutionBlockHash,
timestamp: u64,
random: Hash256,
prev_randao: Hash256,
finalized_block_hash: ExecutionBlockHash,
proposer_index: u64,
) -> Result<ExecutionPayload<T>, Error> {
@ -402,14 +402,14 @@ impl ExecutionLayer {
self.log(),
"Issuing engine_getPayload";
"suggested_fee_recipient" => ?suggested_fee_recipient,
"random" => ?random,
"prev_randao" => ?prev_randao,
"timestamp" => timestamp,
"parent_hash" => ?parent_hash,
);
self.engines()
.first_success(|engine| async move {
let payload_id = if let Some(id) = engine
.get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient)
.get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient)
.await
{
// The payload id has been cached for this engine.
@ -428,7 +428,7 @@ impl ExecutionLayer {
};
let payload_attributes = PayloadAttributes {
timestamp,
random,
prev_randao,
suggested_fee_recipient,
};

View File

@ -326,7 +326,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
receipts_root: Hash256::repeat_byte(42),
state_root: Hash256::repeat_byte(43),
logs_bloom: vec![0; 256].into(),
random: attributes.random,
prev_randao: attributes.prev_randao,
block_number: parent.block_number() + 1,
gas_limit: GAS_LIMIT,
gas_used: GAS_USED,

View File

@ -111,7 +111,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
let parent_hash = latest_execution_block.block_hash();
let block_number = latest_execution_block.block_number() + 1;
let timestamp = block_number;
let random = Hash256::from_low_u64_be(block_number);
let prev_randao = Hash256::from_low_u64_be(block_number);
let finalized_block_hash = parent_hash;
self.el
@ -120,7 +120,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
ExecutionBlockHash::zero(),
Some(PayloadAttributes {
timestamp,
random,
prev_randao,
suggested_fee_recipient: Address::repeat_byte(42),
}),
)
@ -133,7 +133,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
.get_payload::<T>(
parent_hash,
timestamp,
random,
prev_randao,
finalized_block_hash,
validator_index,
)
@ -143,7 +143,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
assert_eq!(payload.parent_hash, parent_hash);
assert_eq!(payload.block_number, block_number);
assert_eq!(payload.timestamp, timestamp);
assert_eq!(payload.random, random);
assert_eq!(payload.prev_randao, prev_randao);
let status = self.el.notify_new_payload(&payload).await.unwrap();
assert_eq!(status, PayloadStatus::Valid);

View File

@ -13,7 +13,12 @@ status = [
"clippy",
"arbitrary-check",
"cargo-audit",
"cargo-udeps"
"cargo-udeps",
"beacon-chain-tests",
"op-pool-tests",
"doppelganger-protection-test",
"execution-engine-integration-ubuntu",
"cargo-vendor"
]
use_squash_merge = true
timeout_sec = 10800

View File

@ -1,4 +1,4 @@
use crate::ForkChoiceStore;
use crate::{ForkChoiceStore, InvalidationOperation};
use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode};
use std::cmp::Ordering;
@ -480,11 +480,10 @@ where
/// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation.
pub fn on_invalid_execution_payload(
&mut self,
head_block_root: Hash256,
latest_valid_ancestor_root: Option<ExecutionBlockHash>,
op: &InvalidationOperation,
) -> Result<(), Error<T::Error>> {
self.proto_array
.process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root)
.process_execution_payload_invalidation(op)
.map_err(Error::FailedToProcessInvalidExecutionPayload)
}
@ -956,19 +955,19 @@ where
return Ok(true);
}
// If the block has an ancestor with a verified parent, import this block.
// If the parent block has execution enabled, always import the block.
//
// TODO: This condition is not yet merged into the spec. See:
// TODO(bellatrix): this condition has not yet been merged into the spec.
//
// https://github.com/ethereum/consensus-specs/pull/2841
// See:
//
// ## Note
//
// If `block_parent_root` is unknown this iter will always return `None`.
// https://github.com/ethereum/consensus-specs/pull/2844
if self
.proto_array
.iter_nodes(block_parent_root)
.any(|node| node.execution_status.is_valid())
.get_block(block_parent_root)
.map_or(false, |parent| {
parent.execution_status.is_execution_enabled()
})
{
return Ok(true);
}

View File

@ -6,4 +6,4 @@ pub use crate::fork_choice::{
PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation,
};
pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::Block as ProtoBlock;
pub use proto_array::{Block as ProtoBlock, InvalidationOperation};

View File

@ -4,6 +4,7 @@ mod no_votes;
mod votes;
use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
use crate::InvalidationOperation;
use serde_derive::{Deserialize, Serialize};
use types::{
AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256,
@ -238,12 +239,22 @@ impl ForkChoiceTestDefinition {
Operation::InvalidatePayload {
head_block_root,
latest_valid_ancestor_root,
} => fork_choice
.process_execution_payload_invalidation(
} => {
let op = if let Some(latest_valid_ancestor) = latest_valid_ancestor_root {
InvalidationOperation::InvalidateMany {
head_block_root,
latest_valid_ancestor_root,
)
.unwrap(),
always_invalidate_head: true,
latest_valid_ancestor,
}
} else {
InvalidationOperation::InvalidateOne {
block_root: head_block_root,
}
};
fork_choice
.process_execution_payload_invalidation(&op)
.unwrap()
}
Operation::AssertWeight { block_root, weight } => assert_eq!(
fork_choice.get_weight(&block_root).unwrap(),
weight,

View File

@ -4,6 +4,7 @@ mod proto_array;
mod proto_array_fork_choice;
mod ssz_container;
pub use crate::proto_array::InvalidationOperation;
pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
pub use error::Error;

View File

@ -15,6 +15,56 @@ use types::{
four_byte_option_impl!(four_byte_option_usize, usize);
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
/// Defines an operation which may invalidate the `execution_status` of some nodes.
pub enum InvalidationOperation {
/// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors.
InvalidateOne { block_root: Hash256 },
/// Invalidate blocks between `head_block_root` and `latest_valid_ancestor`.
///
/// If the `latest_valid_ancestor` is known to fork choice, invalidate all blocks between
/// `head_block_root` and `latest_valid_ancestor`. The `head_block_root` will be invalidated,
/// whilst the `latest_valid_ancestor` will not.
///
/// If `latest_valid_ancestor` is *not* known to fork choice, only invalidate the
/// `head_block_root` if `always_invalidate_head == true`.
InvalidateMany {
head_block_root: Hash256,
always_invalidate_head: bool,
latest_valid_ancestor: ExecutionBlockHash,
},
}
impl InvalidationOperation {
pub fn block_root(&self) -> Hash256 {
match self {
InvalidationOperation::InvalidateOne { block_root } => *block_root,
InvalidationOperation::InvalidateMany {
head_block_root, ..
} => *head_block_root,
}
}
pub fn latest_valid_ancestor(&self) -> Option<ExecutionBlockHash> {
match self {
InvalidationOperation::InvalidateOne { .. } => None,
InvalidationOperation::InvalidateMany {
latest_valid_ancestor,
..
} => Some(*latest_valid_ancestor),
}
}
pub fn invalidate_block_root(&self) -> bool {
match self {
InvalidationOperation::InvalidateOne { .. } => true,
InvalidationOperation::InvalidateMany {
always_invalidate_head,
..
} => *always_invalidate_head,
}
}
}
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
pub struct ProtoNode {
/// The `slot` is not necessary for `ProtoArray`, it just exists so external components can
@ -328,43 +378,15 @@ impl ProtoArray {
}
}
/// Invalidate the relevant ancestors and descendants of a block with an invalid execution
/// payload.
/// Invalidate zero or more blocks, as specified by the `InvalidationOperation`.
///
/// The `head_block_root` should be the beacon block root of the block with the invalid
/// execution payload, _or_ its parent where the block with the invalid payload has not yet
/// been applied to `self`.
///
/// The `latest_valid_hash` should be the hash of most recent *valid* execution payload
/// contained in an ancestor block of `head_block_root`.
///
/// This function will invalidate:
///
/// * The block matching `head_block_root` _unless_ that block has a payload matching `latest_valid_hash`.
/// * All ancestors of `head_block_root` back to the block with payload matching
/// `latest_valid_hash` (endpoint > exclusive). In the case where the `head_block_root` is the parent
/// of the invalid block and itself matches `latest_valid_hash`, no ancestors will be invalidated.
/// * All descendants of `latest_valid_hash` if supplied and consistent with `head_block_root`,
/// or else all descendants of `head_block_root`.
///
/// ## Details
///
/// If `head_block_root` is not known to fork choice, an error is returned.
///
/// If `latest_valid_hash` is `Some(hash)` where `hash` is either not known to fork choice
/// (perhaps it's junk or pre-finalization), then only the `head_block_root` block will be
/// invalidated (no ancestors). No error will be returned in this case.
///
/// If `latest_valid_hash` is `Some(hash)` where `hash` is a known ancestor of
/// `head_block_root`, then all blocks between `head_block_root` and `latest_valid_hash` will
/// be invalidated. Additionally, all blocks that descend from a newly-invalidated block will
/// also be invalidated.
/// See the documentation of `InvalidationOperation` for usage.
pub fn propagate_execution_payload_invalidation(
&mut self,
head_block_root: Hash256,
latest_valid_ancestor_hash: Option<ExecutionBlockHash>,
op: &InvalidationOperation,
) -> Result<(), Error> {
let mut invalidated_indices: HashSet<usize> = <_>::default();
let head_block_root = op.block_root();
/*
* Step 1:
@ -379,7 +401,8 @@ impl ProtoArray {
.ok_or(Error::NodeUnknown(head_block_root))?;
// Try to map the ancestor payload *hash* to an ancestor beacon block *root*.
let latest_valid_ancestor_root = latest_valid_ancestor_hash
let latest_valid_ancestor_root = op
.latest_valid_ancestor()
.and_then(|hash| self.execution_block_hash_to_beacon_block_root(&hash));
// Set to `true` if both conditions are satisfied:
@ -414,7 +437,7 @@ impl ProtoArray {
// an invalid justified checkpoint.
if !latest_valid_ancestor_is_descendant && node.root != head_block_root {
break;
} else if Some(hash) == latest_valid_ancestor_hash {
} else if op.latest_valid_ancestor() == Some(hash) {
// If the `best_child` or `best_descendant` of the latest valid hash was
// invalidated, set those fields to `None`.
//
@ -444,6 +467,14 @@ impl ProtoArray {
ExecutionStatus::Irrelevant(_) => break,
}
// Only invalidate the head block if either:
//
// - The head block was specifically indicated to be invalidated.
// - The latest valid hash is a known ancestor.
if node.root != head_block_root
|| op.invalidate_block_root()
|| latest_valid_ancestor_is_descendant
{
match &node.execution_status {
// It's illegal for an execution client to declare that some previously-valid block
// is now invalid. This is a consensus failure on their behalf.
@ -454,6 +485,7 @@ impl ProtoArray {
})
}
ExecutionStatus::Unknown(hash) => {
invalidated_indices.insert(index);
node.execution_status = ExecutionStatus::Invalid(*hash);
// It's impossible for an invalid block to lead to a "best" block, so set these
@ -471,8 +503,7 @@ impl ProtoArray {
// ancestors.
ExecutionStatus::Irrelevant(_) => break,
}
invalidated_indices.insert(index);
}
if let Some(parent_index) = node.parent {
index = parent_index

View File

@ -1,5 +1,5 @@
use crate::error::Error;
use crate::proto_array::{Iter, ProposerBoost, ProtoArray};
use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray};
use crate::ssz_container::SszContainer;
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, Encode};
@ -191,11 +191,10 @@ impl ProtoArrayForkChoice {
/// See `ProtoArray::propagate_execution_payload_invalidation` for documentation.
pub fn process_execution_payload_invalidation(
&mut self,
head_block_root: Hash256,
latest_valid_ancestor_root: Option<ExecutionBlockHash>,
op: &InvalidationOperation,
) -> Result<(), String> {
self.proto_array
.propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root)
.propagate_execution_payload_invalidation(op)
.map_err(|e| format!("Failed to process invalid payload: {:?}", e))
}

View File

@ -329,10 +329,10 @@ pub fn partially_verify_execution_payload<T: EthSpec>(
);
}
block_verify!(
payload.random == *state.get_randao_mix(state.current_epoch())?,
payload.prev_randao == *state.get_randao_mix(state.current_epoch())?,
BlockProcessingError::ExecutionRandaoMismatch {
expected: *state.get_randao_mix(state.current_epoch())?,
found: payload.random,
found: payload.prev_randao,
}
);
@ -368,7 +368,7 @@ pub fn process_execution_payload<T: EthSpec>(
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom.clone(),
random: payload.random,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,

View File

@ -21,7 +21,7 @@ pub struct ExecutionPayload<T: EthSpec> {
pub receipts_root: Hash256,
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
pub random: Hash256,
pub prev_randao: Hash256,
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub block_number: u64,
#[serde(with = "eth2_serde_utils::quoted_u64")]

View File

@ -15,7 +15,7 @@ pub struct ExecutionPayloadHeader<T: EthSpec> {
pub receipts_root: Hash256,
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
pub random: Hash256,
pub prev_randao: Hash256,
#[serde(with = "eth2_serde_utils::quoted_u64")]
pub block_number: u64,
#[serde(with = "eth2_serde_utils::quoted_u64")]

View File

@ -23,7 +23,7 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
base_fee_per_gas,
timestamp: genesis_time,
block_hash: eth1_block_hash,
random: eth1_block_hash.into_root(),
prev_randao: eth1_block_hash.into_root(),
..ExecutionPayloadHeader::default()
};
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;

View File

@ -1,4 +1,4 @@
TESTS_TAG := v1.1.9
TESTS_TAG := v1.1.10
TESTS = general minimal mainnet
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))

View File

@ -3,8 +3,6 @@ name = "execution_engine_integration"
version = "0.1.0"
edition = "2021"
build = "build.rs"
[dependencies]
tempfile = "3.1.0"
serde_json = "1.0.58"

View File

@ -1,5 +1,5 @@
test:
cargo test --release --locked
cargo run --release --locked
clean:
rm -rf execution_clients

View File

@ -3,10 +3,10 @@ use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
const GETH_BRANCH: &str = "merge-kiln";
const GETH_BRANCH: &str = "merge-kiln-v2";
const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum";
fn main() {
pub fn build() {
let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into();
let execution_clients_dir = manifest_dir.join("execution_clients");
@ -52,11 +52,15 @@ fn build_geth(execution_clients_dir: &Path) {
.success());
// Build geth
assert!(Command::new("make")
let make_result = Command::new("make")
.arg("geth")
.current_dir(&repo_dir)
.output()
.expect("failed to make geth")
.status
.success());
.expect("failed to make geth");
if !make_result.status.success() {
dbg!(String::from_utf8_lossy(&make_result.stdout));
dbg!(String::from_utf8_lossy(&make_result.stderr));
panic!("make failed");
}
}

View File

@ -9,7 +9,7 @@ use unused_port::unused_tcp_port;
/// Defined for each EE type (e.g., Geth, Nethermind, etc).
pub trait GenericExecutionEngine: Clone {
fn init_datadir() -> TempDir;
fn start_client(datadir: &TempDir, http_port: u16) -> Child;
fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child;
}
/// Holds handle to a running EE process, plus some other metadata.
@ -19,6 +19,7 @@ pub struct ExecutionEngine<E> {
#[allow(dead_code)]
datadir: TempDir,
http_port: u16,
http_auth_port: u16,
child: Child,
}
@ -35,11 +36,13 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> {
pub fn new(engine: E) -> Self {
let datadir = E::init_datadir();
let http_port = unused_tcp_port().unwrap();
let child = E::start_client(&datadir, http_port);
let http_auth_port = unused_tcp_port().unwrap();
let child = E::start_client(&datadir, http_port, http_auth_port);
Self {
engine,
datadir,
http_port,
http_auth_port,
child,
}
}
@ -47,6 +50,11 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> {
pub fn http_url(&self) -> SensitiveUrl {
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap()
}
#[allow(dead_code)] // Future use.
pub fn http_ath_url(&self) -> SensitiveUrl {
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap()
}
}
/*
@ -90,7 +98,7 @@ impl GenericExecutionEngine for Geth {
datadir
}
fn start_client(datadir: &TempDir, http_port: u16) -> Child {
fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child {
let network_port = unused_tcp_port().unwrap();
Command::new(Self::binary_path())
@ -101,6 +109,8 @@ impl GenericExecutionEngine for Geth {
.arg("engine,eth")
.arg("--http.port")
.arg(http_port.to_string())
.arg("--http.authport")
.arg(http_auth_port.to_string())
.arg("--port")
.arg(network_port.to_string())
.stdout(build_stdio())

View File

@ -1,12 +0,0 @@
/// This library provides integration testing between Lighthouse and other execution engines.
///
/// See the `tests/tests.rs` file to run tests.
mod execution_engine;
mod genesis_json;
mod test_rig;
pub use execution_engine::Geth;
pub use test_rig::TestRig;
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
const SUPPRESS_LOGS: bool = true;

View File

@ -0,0 +1,28 @@
/// This binary runs integration tests between Lighthouse and execution engines.
///
/// It will first attempt to build any supported integration clients, then it will run tests.
///
/// A return code of `0` indicates the tests succeeded.
mod build_geth;
mod execution_engine;
mod genesis_json;
mod test_rig;
use execution_engine::Geth;
use test_rig::TestRig;
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
const SUPPRESS_LOGS: bool = false;
fn main() {
if cfg!(windows) {
panic!("windows is not supported, only linux");
}
test_geth()
}
fn test_geth() {
build_geth::build();
TestRig::new(Geth).perform_tests_blocking();
}

View File

@ -138,7 +138,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
let parent_hash = terminal_pow_block_hash;
let timestamp = timestamp_now();
let random = Hash256::zero();
let prev_randao = Hash256::zero();
let finalized_block_hash = ExecutionBlockHash::zero();
let proposer_index = 0;
let valid_payload = self
@ -147,7 +147,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
.get_payload::<MainnetEthSpec>(
parent_hash,
timestamp,
random,
prev_randao,
finalized_block_hash,
proposer_index,
)
@ -210,7 +210,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
*/
let mut invalid_payload = valid_payload.clone();
invalid_payload.random = Hash256::from_low_u64_be(42);
invalid_payload.prev_randao = Hash256::from_low_u64_be(42);
let status = self
.ee_a
.execution_layer
@ -227,7 +227,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
let parent_hash = valid_payload.block_hash;
let timestamp = valid_payload.timestamp + 1;
let random = Hash256::zero();
let prev_randao = Hash256::zero();
let finalized_block_hash = ExecutionBlockHash::zero();
let proposer_index = 0;
let second_payload = self
@ -236,7 +236,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
.get_payload::<MainnetEthSpec>(
parent_hash,
timestamp,
random,
prev_randao,
finalized_block_hash,
proposer_index,
)
@ -266,7 +266,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
let finalized_block_hash = ExecutionBlockHash::zero();
let payload_attributes = Some(PayloadAttributes {
timestamp: second_payload.timestamp + 1,
random: Hash256::zero(),
prev_randao: Hash256::zero(),
suggested_fee_recipient: Address::zero(),
});
let status = self

View File

@ -1,16 +0,0 @@
#[cfg(not(target_family = "windows"))]
mod not_windows {
use execution_engine_integration::{Geth, TestRig};
#[test]
fn geth() {
TestRig::new(Geth).perform_tests_blocking()
}
}
#[cfg(target_family = "windows")]
mod windows {
#[test]
fn all_tests_skipped_on_windows() {
//
}
}