Rename random to prev_randao (#3040)
## Issue Addressed As discussed on last-night's consensus call, the testnets next week will target the [Kiln Spec v2](https://hackmd.io/@n0ble/kiln-spec). Presently, we support Kiln V1. V2 is backwards compatible, except for renaming `random` to `prev_randao` in: - https://github.com/ethereum/execution-apis/pull/180 - https://github.com/ethereum/consensus-specs/pull/2835 With this PR we'll no longer be compatible with the existing Kintsugi and Kiln testnets, however we'll be ready for the testnets next week. I raised this breaking change in the call last night, we are all keen to move forward and break things. We now target the [`merge-kiln-v2`](https://github.com/MariusVanDerWijden/go-ethereum/tree/merge-kiln-v2) branch for interop with Geth. This required adding the `--http.aauthport` to the tester to avoid a port conflict at startup. ### Changes to exec integration tests There's some change in the `merge-kiln-v2` version of Geth that means it can't compile on a vanilla Github runner. Bumping the `go` version on the runner solved this issue. Whilst addressing this, I refactored the `testing/execution_integration` crate to be a *binary* rather than a *library* with tests. This means that we don't need to run the `build.rs` and build Geth whenever someone runs `make lint` or `make test-release`. This is nice for everyday users, but it's also nice for CI so that we can have a specific runner for these tests and we don't need to ensure *all* runners support everything required to build all execution clients. ## More Info - [x] ~~EF tests are failing since the rename has broken some tests that reference the old field name. I have been told there will be new tests released in the coming days (25/02/22 or 26/02/22).~~
This commit is contained in:
parent
4bf1af4e85
commit
aea43b626b
3
.github/workflows/test-suite.yml
vendored
3
.github/workflows/test-suite.yml
vendored
@ -190,6 +190,9 @@ jobs:
|
|||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '1.17'
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Run exec engine integration tests in release
|
- name: Run exec engine integration tests in release
|
||||||
|
@ -53,7 +53,7 @@ use eth2::types::{
|
|||||||
EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty,
|
EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty,
|
||||||
};
|
};
|
||||||
use execution_layer::{ExecutionLayer, PayloadStatus};
|
use execution_layer::{ExecutionLayer, PayloadStatus};
|
||||||
use fork_choice::{AttestationFromBlock, ForkChoice};
|
use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation};
|
||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
@ -3180,49 +3180,29 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// This method must be called whenever an execution engine indicates that a payload is
|
/// This method must be called whenever an execution engine indicates that a payload is
|
||||||
/// invalid.
|
/// invalid.
|
||||||
///
|
///
|
||||||
/// If the `latest_root` is known to fork-choice it will be invalidated. If it is not known, an
|
/// Fork choice will be run after the invalidation. The client may be shut down if the `op`
|
||||||
/// error will be returned.
|
/// results in the justified checkpoint being invalidated.
|
||||||
///
|
///
|
||||||
/// If `latest_valid_hash` is `None` or references a block unknown to fork choice, no other
|
/// See the documentation of `InvalidationOperation` for information about defining `op`.
|
||||||
/// blocks will be invalidated. If `latest_valid_hash` is a block known to fork choice, all
|
|
||||||
/// blocks between the `latest_root` and the `latest_valid_hash` will be invalidated (which may
|
|
||||||
/// cause further, second-order invalidations).
|
|
||||||
///
|
|
||||||
/// ## Notes
|
|
||||||
///
|
|
||||||
/// Use these rules to set `latest_root`:
|
|
||||||
///
|
|
||||||
/// - When `forkchoiceUpdated` indicates an invalid block, set `latest_root` to be the
|
|
||||||
/// block root that was the head of the chain when `forkchoiceUpdated` was called.
|
|
||||||
/// - When `executePayload` returns an invalid block *during* block import, set
|
|
||||||
/// `latest_root` to be the parent of the beacon block containing the invalid
|
|
||||||
/// payload (because the block containing the payload is not present in fork choice).
|
|
||||||
/// - When `executePayload` returns an invalid block *after* block import, set
|
|
||||||
/// `latest_root` to be root of the beacon block containing the invalid payload.
|
|
||||||
pub fn process_invalid_execution_payload(
|
pub fn process_invalid_execution_payload(
|
||||||
&self,
|
&self,
|
||||||
latest_root: Hash256,
|
op: &InvalidationOperation,
|
||||||
latest_valid_hash: Option<ExecutionBlockHash>,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Invalid execution payload in block";
|
"Invalid execution payload in block";
|
||||||
"latest_valid_hash" => ?latest_valid_hash,
|
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
|
||||||
"latest_root" => ?latest_root,
|
"block_root" => ?op.block_root(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Update fork choice.
|
// Update fork choice.
|
||||||
if let Err(e) = self
|
if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) {
|
||||||
.fork_choice
|
|
||||||
.write()
|
|
||||||
.on_invalid_execution_payload(latest_root, latest_valid_hash)
|
|
||||||
{
|
|
||||||
crit!(
|
crit!(
|
||||||
self.log,
|
self.log,
|
||||||
"Failed to process invalid payload";
|
"Failed to process invalid payload";
|
||||||
"error" => ?e,
|
"error" => ?e,
|
||||||
"latest_valid_hash" => ?latest_valid_hash,
|
"latest_valid_ancestor" => ?op.latest_valid_ancestor(),
|
||||||
"latest_root" => ?latest_root,
|
"block_root" => ?op.block_root(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3763,8 +3743,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// The execution engine has stated that all blocks between the
|
// The execution engine has stated that all blocks between the
|
||||||
// `head_execution_block_hash` and `latest_valid_hash` are invalid.
|
// `head_execution_block_hash` and `latest_valid_hash` are invalid.
|
||||||
self.process_invalid_execution_payload(
|
self.process_invalid_execution_payload(
|
||||||
head_block_root,
|
&InvalidationOperation::InvalidateMany {
|
||||||
Some(*latest_valid_hash),
|
head_block_root,
|
||||||
|
always_invalidate_head: true,
|
||||||
|
latest_valid_ancestor: *latest_valid_hash,
|
||||||
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
||||||
@ -3781,7 +3764,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
//
|
//
|
||||||
// Using a `None` latest valid ancestor will result in only the head block
|
// Using a `None` latest valid ancestor will result in only the head block
|
||||||
// being invalidated (no ancestors).
|
// being invalidated (no ancestors).
|
||||||
self.process_invalid_execution_payload(head_block_root, None)?;
|
self.process_invalid_execution_payload(
|
||||||
|
&InvalidationOperation::InvalidateOne {
|
||||||
|
block_root: head_block_root,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status })
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ use crate::{
|
|||||||
ExecutionPayloadError,
|
ExecutionPayloadError,
|
||||||
};
|
};
|
||||||
use execution_layer::PayloadStatus;
|
use execution_layer::PayloadStatus;
|
||||||
use fork_choice::PayloadVerificationStatus;
|
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
||||||
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
||||||
use slog::debug;
|
use slog::debug;
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -68,7 +68,13 @@ pub fn notify_new_payload<T: BeaconChainTypes>(
|
|||||||
// This block has not yet been applied to fork choice, so the latest block that was
|
// This block has not yet been applied to fork choice, so the latest block that was
|
||||||
// imported to fork choice was the parent.
|
// imported to fork choice was the parent.
|
||||||
let latest_root = block.parent_root();
|
let latest_root = block.parent_root();
|
||||||
chain.process_invalid_execution_payload(latest_root, Some(latest_valid_hash))?;
|
chain.process_invalid_execution_payload(
|
||||||
|
&InvalidationOperation::InvalidateMany {
|
||||||
|
head_block_root: latest_root,
|
||||||
|
always_invalidate_head: false,
|
||||||
|
latest_valid_ancestor: latest_valid_hash,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
|
Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into())
|
||||||
}
|
}
|
||||||
@ -145,11 +151,19 @@ pub fn validate_merge_block<T: BeaconChainTypes>(
|
|||||||
.slot_clock
|
.slot_clock
|
||||||
.now()
|
.now()
|
||||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
||||||
// Check the optimistic sync conditions. Note that because this is the merge block,
|
|
||||||
// the justified checkpoint can't have execution enabled so we only need to check the
|
// Ensure the block is a candidate for optimistic import.
|
||||||
// current slot is at least SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the block
|
if chain
|
||||||
// https://github.com/ethereum/consensus-specs/blob/v1.1.9/sync/optimistic.md#when-to-optimistically-import-blocks
|
.fork_choice
|
||||||
if block.slot() + chain.spec.safe_slots_to_import_optimistically <= current_slot {
|
.read()
|
||||||
|
.is_optimistic_candidate_block(
|
||||||
|
current_slot,
|
||||||
|
block.slot(),
|
||||||
|
&block.parent_root(),
|
||||||
|
&chain.spec,
|
||||||
|
)
|
||||||
|
.map_err(BeaconChainError::from)?
|
||||||
|
{
|
||||||
debug!(
|
debug!(
|
||||||
chain.log,
|
chain.log,
|
||||||
"Optimistically accepting terminal block";
|
"Optimistically accepting terminal block";
|
||||||
|
@ -231,8 +231,10 @@ fn valid_invalid_syncing() {
|
|||||||
/// `latest_valid_hash`.
|
/// `latest_valid_hash`.
|
||||||
#[test]
|
#[test]
|
||||||
fn invalid_payload_invalidates_parent() {
|
fn invalid_payload_invalidates_parent() {
|
||||||
let mut rig = InvalidPayloadRig::new();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
|
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||||
|
rig.move_to_first_justification(Payload::Syncing);
|
||||||
|
|
||||||
let roots = vec![
|
let roots = vec![
|
||||||
rig.import_block(Payload::Syncing),
|
rig.import_block(Payload::Syncing),
|
||||||
@ -258,6 +260,7 @@ fn invalid_payload_invalidates_parent() {
|
|||||||
fn justified_checkpoint_becomes_invalid() {
|
fn justified_checkpoint_becomes_invalid() {
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
|
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||||
rig.move_to_first_justification(Payload::Syncing);
|
rig.move_to_first_justification(Payload::Syncing);
|
||||||
|
|
||||||
let justified_checkpoint = rig.head_info().current_justified_checkpoint;
|
let justified_checkpoint = rig.head_info().current_justified_checkpoint;
|
||||||
@ -305,7 +308,9 @@ fn pre_finalized_latest_valid_hash() {
|
|||||||
|
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
let mut blocks = vec![];
|
||||||
|
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||||
|
blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing));
|
||||||
|
|
||||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||||
|
|
||||||
@ -330,7 +335,11 @@ fn pre_finalized_latest_valid_hash() {
|
|||||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||||
let slot = Slot::new(i);
|
let slot = Slot::new(i);
|
||||||
let root = rig.block_root_at_slot(slot).unwrap();
|
let root = rig.block_root_at_slot(slot).unwrap();
|
||||||
assert!(rig.execution_status(root).is_not_verified());
|
if slot == 1 {
|
||||||
|
assert!(rig.execution_status(root).is_valid());
|
||||||
|
} else {
|
||||||
|
assert!(rig.execution_status(root).is_not_verified());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +353,10 @@ fn latest_valid_hash_will_validate() {
|
|||||||
|
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
let blocks = rig.build_blocks(4, Payload::Syncing);
|
|
||||||
|
let mut blocks = vec![];
|
||||||
|
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||||
|
blocks.extend(rig.build_blocks(4, Payload::Syncing));
|
||||||
|
|
||||||
let latest_valid_root = rig
|
let latest_valid_root = rig
|
||||||
.block_root_at_slot(Slot::new(LATEST_VALID_SLOT))
|
.block_root_at_slot(Slot::new(LATEST_VALID_SLOT))
|
||||||
@ -357,7 +369,7 @@ fn latest_valid_hash_will_validate() {
|
|||||||
|
|
||||||
assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT);
|
assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT);
|
||||||
|
|
||||||
for slot in 0..=4 {
|
for slot in 0..=5 {
|
||||||
let slot = Slot::new(slot);
|
let slot = Slot::new(slot);
|
||||||
let root = if slot > 0 {
|
let root = if slot > 0 {
|
||||||
// If not the genesis slot, check the blocks we just produced.
|
// If not the genesis slot, check the blocks we just produced.
|
||||||
@ -386,7 +398,9 @@ fn latest_valid_hash_is_junk() {
|
|||||||
|
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
let mut blocks = vec![];
|
||||||
|
blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block.
|
||||||
|
blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing));
|
||||||
|
|
||||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||||
|
|
||||||
@ -408,7 +422,11 @@ fn latest_valid_hash_is_junk() {
|
|||||||
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
for i in E::slots_per_epoch() * finalized_epoch..num_blocks {
|
||||||
let slot = Slot::new(i);
|
let slot = Slot::new(i);
|
||||||
let root = rig.block_root_at_slot(slot).unwrap();
|
let root = rig.block_root_at_slot(slot).unwrap();
|
||||||
assert!(rig.execution_status(root).is_not_verified());
|
if slot == 1 {
|
||||||
|
assert!(rig.execution_status(root).is_valid());
|
||||||
|
} else {
|
||||||
|
assert!(rig.execution_status(root).is_not_verified());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,6 +439,7 @@ fn invalidates_all_descendants() {
|
|||||||
|
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
|
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||||
|
|
||||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||||
@ -493,6 +512,7 @@ fn switches_heads() {
|
|||||||
|
|
||||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
|
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||||
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
let blocks = rig.build_blocks(num_blocks, Payload::Syncing);
|
||||||
|
|
||||||
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch);
|
||||||
@ -571,8 +591,9 @@ fn invalid_during_processing() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn invalid_after_optimistic_sync() {
|
fn invalid_after_optimistic_sync() {
|
||||||
let mut rig = InvalidPayloadRig::new();
|
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||||
rig.move_to_terminal_block();
|
rig.move_to_terminal_block();
|
||||||
|
rig.import_block(Payload::Valid); // Import a valid transition block.
|
||||||
|
|
||||||
let mut roots = vec![
|
let mut roots = vec![
|
||||||
rig.import_block(Payload::Syncing),
|
rig.import_block(Payload::Syncing),
|
||||||
|
@ -110,7 +110,7 @@ pub struct ExecutionBlock {
|
|||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct PayloadAttributes {
|
pub struct PayloadAttributes {
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
pub suggested_fee_recipient: Address,
|
pub suggested_fee_recipient: Address,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ mod test {
|
|||||||
"stateRoot": HASH_01,
|
"stateRoot": HASH_01,
|
||||||
"receiptsRoot": HASH_00,
|
"receiptsRoot": HASH_00,
|
||||||
"logsBloom": LOGS_BLOOM_01,
|
"logsBloom": LOGS_BLOOM_01,
|
||||||
"random": HASH_01,
|
"prevRandao": HASH_01,
|
||||||
"blockNumber": "0x0",
|
"blockNumber": "0x0",
|
||||||
"gasLimit": "0x1",
|
"gasLimit": "0x1",
|
||||||
"gasUsed": "0x2",
|
"gasUsed": "0x2",
|
||||||
@ -441,7 +441,7 @@ mod test {
|
|||||||
},
|
},
|
||||||
Some(PayloadAttributes {
|
Some(PayloadAttributes {
|
||||||
timestamp: 5,
|
timestamp: 5,
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
suggested_fee_recipient: Address::repeat_byte(0),
|
suggested_fee_recipient: Address::repeat_byte(0),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
@ -458,7 +458,7 @@ mod test {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"timestamp":"0x5",
|
"timestamp":"0x5",
|
||||||
"random": HASH_00,
|
"prevRandao": HASH_00,
|
||||||
"suggestedFeeRecipient": ADDRESS_00
|
"suggestedFeeRecipient": ADDRESS_00
|
||||||
}]
|
}]
|
||||||
}),
|
}),
|
||||||
@ -495,7 +495,7 @@ mod test {
|
|||||||
state_root: Hash256::repeat_byte(1),
|
state_root: Hash256::repeat_byte(1),
|
||||||
receipts_root: Hash256::repeat_byte(0),
|
receipts_root: Hash256::repeat_byte(0),
|
||||||
logs_bloom: vec![1; 256].into(),
|
logs_bloom: vec![1; 256].into(),
|
||||||
random: Hash256::repeat_byte(1),
|
prev_randao: Hash256::repeat_byte(1),
|
||||||
block_number: 0,
|
block_number: 0,
|
||||||
gas_limit: 1,
|
gas_limit: 1,
|
||||||
gas_used: 2,
|
gas_used: 2,
|
||||||
@ -517,7 +517,7 @@ mod test {
|
|||||||
"stateRoot": HASH_01,
|
"stateRoot": HASH_01,
|
||||||
"receiptsRoot": HASH_00,
|
"receiptsRoot": HASH_00,
|
||||||
"logsBloom": LOGS_BLOOM_01,
|
"logsBloom": LOGS_BLOOM_01,
|
||||||
"random": HASH_01,
|
"prevRandao": HASH_01,
|
||||||
"blockNumber": "0x0",
|
"blockNumber": "0x0",
|
||||||
"gasLimit": "0x1",
|
"gasLimit": "0x1",
|
||||||
"gasUsed": "0x2",
|
"gasUsed": "0x2",
|
||||||
@ -596,7 +596,7 @@ mod test {
|
|||||||
},
|
},
|
||||||
Some(PayloadAttributes {
|
Some(PayloadAttributes {
|
||||||
timestamp: 5,
|
timestamp: 5,
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
|
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
@ -613,7 +613,7 @@ mod test {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"timestamp":"0x5",
|
"timestamp":"0x5",
|
||||||
"random": HASH_00,
|
"prevRandao": HASH_00,
|
||||||
"suggestedFeeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"
|
"suggestedFeeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"
|
||||||
}]
|
}]
|
||||||
})
|
})
|
||||||
@ -643,7 +643,7 @@ mod test {
|
|||||||
},
|
},
|
||||||
Some(PayloadAttributes {
|
Some(PayloadAttributes {
|
||||||
timestamp: 5,
|
timestamp: 5,
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
|
suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
@ -687,7 +687,7 @@ mod test {
|
|||||||
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
||||||
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsBloom": LOGS_BLOOM_00,
|
"logsBloom": LOGS_BLOOM_00,
|
||||||
"random": HASH_00,
|
"prevRandao": HASH_00,
|
||||||
"blockNumber":"0x1",
|
"blockNumber":"0x1",
|
||||||
"gasLimit":"0x1c95111",
|
"gasLimit":"0x1c95111",
|
||||||
"gasUsed":"0x0",
|
"gasUsed":"0x0",
|
||||||
@ -710,7 +710,7 @@ mod test {
|
|||||||
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
|
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
|
||||||
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
||||||
logs_bloom: vec![0; 256].into(),
|
logs_bloom: vec![0; 256].into(),
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
block_number: 1,
|
block_number: 1,
|
||||||
gas_limit: u64::from_str_radix("1c95111",16).unwrap(),
|
gas_limit: u64::from_str_radix("1c95111",16).unwrap(),
|
||||||
gas_used: 0,
|
gas_used: 0,
|
||||||
@ -735,7 +735,7 @@ mod test {
|
|||||||
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
|
state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(),
|
||||||
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(),
|
||||||
logs_bloom: vec![0; 256].into(),
|
logs_bloom: vec![0; 256].into(),
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
block_number: 1,
|
block_number: 1,
|
||||||
gas_limit: u64::from_str_radix("1c9c380",16).unwrap(),
|
gas_limit: u64::from_str_radix("1c9c380",16).unwrap(),
|
||||||
gas_used: 0,
|
gas_used: 0,
|
||||||
@ -757,7 +757,7 @@ mod test {
|
|||||||
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
"stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
||||||
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsBloom": LOGS_BLOOM_00,
|
"logsBloom": LOGS_BLOOM_00,
|
||||||
"random": HASH_00,
|
"prevRandao": HASH_00,
|
||||||
"blockNumber":"0x1",
|
"blockNumber":"0x1",
|
||||||
"gasLimit":"0x1c9c380",
|
"gasLimit":"0x1c9c380",
|
||||||
"gasUsed":"0x0",
|
"gasUsed":"0x0",
|
||||||
|
@ -64,7 +64,7 @@ pub struct JsonExecutionPayloadV1<T: EthSpec> {
|
|||||||
pub receipts_root: Hash256,
|
pub receipts_root: Hash256,
|
||||||
#[serde(with = "serde_logs_bloom")]
|
#[serde(with = "serde_logs_bloom")]
|
||||||
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||||
@ -91,7 +91,7 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayloadV1<T> {
|
|||||||
state_root,
|
state_root,
|
||||||
receipts_root,
|
receipts_root,
|
||||||
logs_bloom,
|
logs_bloom,
|
||||||
random,
|
prev_randao,
|
||||||
block_number,
|
block_number,
|
||||||
gas_limit,
|
gas_limit,
|
||||||
gas_used,
|
gas_used,
|
||||||
@ -108,7 +108,7 @@ impl<T: EthSpec> From<ExecutionPayload<T>> for JsonExecutionPayloadV1<T> {
|
|||||||
state_root,
|
state_root,
|
||||||
receipts_root,
|
receipts_root,
|
||||||
logs_bloom,
|
logs_bloom,
|
||||||
random,
|
prev_randao,
|
||||||
block_number,
|
block_number,
|
||||||
gas_limit,
|
gas_limit,
|
||||||
gas_used,
|
gas_used,
|
||||||
@ -130,7 +130,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
|
|||||||
state_root,
|
state_root,
|
||||||
receipts_root,
|
receipts_root,
|
||||||
logs_bloom,
|
logs_bloom,
|
||||||
random,
|
prev_randao,
|
||||||
block_number,
|
block_number,
|
||||||
gas_limit,
|
gas_limit,
|
||||||
gas_used,
|
gas_used,
|
||||||
@ -147,7 +147,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
|
|||||||
state_root,
|
state_root,
|
||||||
receipts_root,
|
receipts_root,
|
||||||
logs_bloom,
|
logs_bloom,
|
||||||
random,
|
prev_randao,
|
||||||
block_number,
|
block_number,
|
||||||
gas_limit,
|
gas_limit,
|
||||||
gas_used,
|
gas_used,
|
||||||
@ -165,7 +165,7 @@ impl<T: EthSpec> From<JsonExecutionPayloadV1<T>> for ExecutionPayload<T> {
|
|||||||
pub struct JsonPayloadAttributesV1 {
|
pub struct JsonPayloadAttributesV1 {
|
||||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
pub suggested_fee_recipient: Address,
|
pub suggested_fee_recipient: Address,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,13 +174,13 @@ impl From<PayloadAttributes> for JsonPayloadAttributesV1 {
|
|||||||
// Use this verbose deconstruction pattern to ensure no field is left unused.
|
// Use this verbose deconstruction pattern to ensure no field is left unused.
|
||||||
let PayloadAttributes {
|
let PayloadAttributes {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
} = p;
|
} = p;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -191,13 +191,13 @@ impl From<JsonPayloadAttributesV1> for PayloadAttributes {
|
|||||||
// Use this verbose deconstruction pattern to ensure no field is left unused.
|
// Use this verbose deconstruction pattern to ensure no field is left unused.
|
||||||
let JsonPayloadAttributesV1 {
|
let JsonPayloadAttributesV1 {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
} = j;
|
} = j;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ impl Logging {
|
|||||||
struct PayloadIdCacheKey {
|
struct PayloadIdCacheKey {
|
||||||
pub head_block_hash: ExecutionBlockHash,
|
pub head_block_hash: ExecutionBlockHash,
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
pub suggested_fee_recipient: Address,
|
pub suggested_fee_recipient: Address,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ impl<T> Engine<T> {
|
|||||||
&self,
|
&self,
|
||||||
head_block_hash: ExecutionBlockHash,
|
head_block_hash: ExecutionBlockHash,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
random: Hash256,
|
prev_randao: Hash256,
|
||||||
suggested_fee_recipient: Address,
|
suggested_fee_recipient: Address,
|
||||||
) -> Option<PayloadId> {
|
) -> Option<PayloadId> {
|
||||||
self.payload_id_cache
|
self.payload_id_cache
|
||||||
@ -86,7 +86,7 @@ impl<T> Engine<T> {
|
|||||||
.get(&PayloadIdCacheKey {
|
.get(&PayloadIdCacheKey {
|
||||||
head_block_hash,
|
head_block_hash,
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
})
|
})
|
||||||
.cloned()
|
.cloned()
|
||||||
@ -393,7 +393,7 @@ impl PayloadIdCacheKey {
|
|||||||
Self {
|
Self {
|
||||||
head_block_hash: state.head_block_hash,
|
head_block_hash: state.head_block_hash,
|
||||||
timestamp: attributes.timestamp,
|
timestamp: attributes.timestamp,
|
||||||
random: attributes.random,
|
prev_randao: attributes.prev_randao,
|
||||||
suggested_fee_recipient: attributes.suggested_fee_recipient,
|
suggested_fee_recipient: attributes.suggested_fee_recipient,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -392,7 +392,7 @@ impl ExecutionLayer {
|
|||||||
&self,
|
&self,
|
||||||
parent_hash: ExecutionBlockHash,
|
parent_hash: ExecutionBlockHash,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
random: Hash256,
|
prev_randao: Hash256,
|
||||||
finalized_block_hash: ExecutionBlockHash,
|
finalized_block_hash: ExecutionBlockHash,
|
||||||
proposer_index: u64,
|
proposer_index: u64,
|
||||||
) -> Result<ExecutionPayload<T>, Error> {
|
) -> Result<ExecutionPayload<T>, Error> {
|
||||||
@ -402,14 +402,14 @@ impl ExecutionLayer {
|
|||||||
self.log(),
|
self.log(),
|
||||||
"Issuing engine_getPayload";
|
"Issuing engine_getPayload";
|
||||||
"suggested_fee_recipient" => ?suggested_fee_recipient,
|
"suggested_fee_recipient" => ?suggested_fee_recipient,
|
||||||
"random" => ?random,
|
"prev_randao" => ?prev_randao,
|
||||||
"timestamp" => timestamp,
|
"timestamp" => timestamp,
|
||||||
"parent_hash" => ?parent_hash,
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
self.engines()
|
self.engines()
|
||||||
.first_success(|engine| async move {
|
.first_success(|engine| async move {
|
||||||
let payload_id = if let Some(id) = engine
|
let payload_id = if let Some(id) = engine
|
||||||
.get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient)
|
.get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
// The payload id has been cached for this engine.
|
// The payload id has been cached for this engine.
|
||||||
@ -428,7 +428,7 @@ impl ExecutionLayer {
|
|||||||
};
|
};
|
||||||
let payload_attributes = PayloadAttributes {
|
let payload_attributes = PayloadAttributes {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient,
|
suggested_fee_recipient,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
receipts_root: Hash256::repeat_byte(42),
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
state_root: Hash256::repeat_byte(43),
|
state_root: Hash256::repeat_byte(43),
|
||||||
logs_bloom: vec![0; 256].into(),
|
logs_bloom: vec![0; 256].into(),
|
||||||
random: attributes.random,
|
prev_randao: attributes.prev_randao,
|
||||||
block_number: parent.block_number() + 1,
|
block_number: parent.block_number() + 1,
|
||||||
gas_limit: GAS_LIMIT,
|
gas_limit: GAS_LIMIT,
|
||||||
gas_used: GAS_USED,
|
gas_used: GAS_USED,
|
||||||
|
@ -111,7 +111,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
let parent_hash = latest_execution_block.block_hash();
|
let parent_hash = latest_execution_block.block_hash();
|
||||||
let block_number = latest_execution_block.block_number() + 1;
|
let block_number = latest_execution_block.block_number() + 1;
|
||||||
let timestamp = block_number;
|
let timestamp = block_number;
|
||||||
let random = Hash256::from_low_u64_be(block_number);
|
let prev_randao = Hash256::from_low_u64_be(block_number);
|
||||||
let finalized_block_hash = parent_hash;
|
let finalized_block_hash = parent_hash;
|
||||||
|
|
||||||
self.el
|
self.el
|
||||||
@ -120,7 +120,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
Some(PayloadAttributes {
|
Some(PayloadAttributes {
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
suggested_fee_recipient: Address::repeat_byte(42),
|
suggested_fee_recipient: Address::repeat_byte(42),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
@ -133,7 +133,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
.get_payload::<T>(
|
.get_payload::<T>(
|
||||||
parent_hash,
|
parent_hash,
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
finalized_block_hash,
|
finalized_block_hash,
|
||||||
validator_index,
|
validator_index,
|
||||||
)
|
)
|
||||||
@ -143,7 +143,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
assert_eq!(payload.parent_hash, parent_hash);
|
assert_eq!(payload.parent_hash, parent_hash);
|
||||||
assert_eq!(payload.block_number, block_number);
|
assert_eq!(payload.block_number, block_number);
|
||||||
assert_eq!(payload.timestamp, timestamp);
|
assert_eq!(payload.timestamp, timestamp);
|
||||||
assert_eq!(payload.random, random);
|
assert_eq!(payload.prev_randao, prev_randao);
|
||||||
|
|
||||||
let status = self.el.notify_new_payload(&payload).await.unwrap();
|
let status = self.el.notify_new_payload(&payload).await.unwrap();
|
||||||
assert_eq!(status, PayloadStatus::Valid);
|
assert_eq!(status, PayloadStatus::Valid);
|
||||||
|
@ -13,7 +13,12 @@ status = [
|
|||||||
"clippy",
|
"clippy",
|
||||||
"arbitrary-check",
|
"arbitrary-check",
|
||||||
"cargo-audit",
|
"cargo-audit",
|
||||||
"cargo-udeps"
|
"cargo-udeps",
|
||||||
|
"beacon-chain-tests",
|
||||||
|
"op-pool-tests",
|
||||||
|
"doppelganger-protection-test",
|
||||||
|
"execution-engine-integration-ubuntu",
|
||||||
|
"cargo-vendor"
|
||||||
]
|
]
|
||||||
use_squash_merge = true
|
use_squash_merge = true
|
||||||
timeout_sec = 10800
|
timeout_sec = 10800
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::ForkChoiceStore;
|
use crate::{ForkChoiceStore, InvalidationOperation};
|
||||||
use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
|
use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
@ -480,11 +480,10 @@ where
|
|||||||
/// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation.
|
/// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation.
|
||||||
pub fn on_invalid_execution_payload(
|
pub fn on_invalid_execution_payload(
|
||||||
&mut self,
|
&mut self,
|
||||||
head_block_root: Hash256,
|
op: &InvalidationOperation,
|
||||||
latest_valid_ancestor_root: Option<ExecutionBlockHash>,
|
|
||||||
) -> Result<(), Error<T::Error>> {
|
) -> Result<(), Error<T::Error>> {
|
||||||
self.proto_array
|
self.proto_array
|
||||||
.process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root)
|
.process_execution_payload_invalidation(op)
|
||||||
.map_err(Error::FailedToProcessInvalidExecutionPayload)
|
.map_err(Error::FailedToProcessInvalidExecutionPayload)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -956,19 +955,19 @@ where
|
|||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the block has an ancestor with a verified parent, import this block.
|
// If the parent block has execution enabled, always import the block.
|
||||||
//
|
//
|
||||||
// TODO: This condition is not yet merged into the spec. See:
|
// TODO(bellatrix): this condition has not yet been merged into the spec.
|
||||||
//
|
//
|
||||||
// https://github.com/ethereum/consensus-specs/pull/2841
|
// See:
|
||||||
//
|
//
|
||||||
// ## Note
|
// https://github.com/ethereum/consensus-specs/pull/2844
|
||||||
//
|
|
||||||
// If `block_parent_root` is unknown this iter will always return `None`.
|
|
||||||
if self
|
if self
|
||||||
.proto_array
|
.proto_array
|
||||||
.iter_nodes(block_parent_root)
|
.get_block(block_parent_root)
|
||||||
.any(|node| node.execution_status.is_valid())
|
.map_or(false, |parent| {
|
||||||
|
parent.execution_status.is_execution_enabled()
|
||||||
|
})
|
||||||
{
|
{
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
@ -6,4 +6,4 @@ pub use crate::fork_choice::{
|
|||||||
PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation,
|
PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation,
|
||||||
};
|
};
|
||||||
pub use fork_choice_store::ForkChoiceStore;
|
pub use fork_choice_store::ForkChoiceStore;
|
||||||
pub use proto_array::Block as ProtoBlock;
|
pub use proto_array::{Block as ProtoBlock, InvalidationOperation};
|
||||||
|
@ -4,6 +4,7 @@ mod no_votes;
|
|||||||
mod votes;
|
mod votes;
|
||||||
|
|
||||||
use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
|
use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
|
||||||
|
use crate::InvalidationOperation;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use types::{
|
use types::{
|
||||||
AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256,
|
AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256,
|
||||||
@ -238,12 +239,22 @@ impl ForkChoiceTestDefinition {
|
|||||||
Operation::InvalidatePayload {
|
Operation::InvalidatePayload {
|
||||||
head_block_root,
|
head_block_root,
|
||||||
latest_valid_ancestor_root,
|
latest_valid_ancestor_root,
|
||||||
} => fork_choice
|
} => {
|
||||||
.process_execution_payload_invalidation(
|
let op = if let Some(latest_valid_ancestor) = latest_valid_ancestor_root {
|
||||||
head_block_root,
|
InvalidationOperation::InvalidateMany {
|
||||||
latest_valid_ancestor_root,
|
head_block_root,
|
||||||
)
|
always_invalidate_head: true,
|
||||||
.unwrap(),
|
latest_valid_ancestor,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
InvalidationOperation::InvalidateOne {
|
||||||
|
block_root: head_block_root,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fork_choice
|
||||||
|
.process_execution_payload_invalidation(&op)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
Operation::AssertWeight { block_root, weight } => assert_eq!(
|
Operation::AssertWeight { block_root, weight } => assert_eq!(
|
||||||
fork_choice.get_weight(&block_root).unwrap(),
|
fork_choice.get_weight(&block_root).unwrap(),
|
||||||
weight,
|
weight,
|
||||||
|
@ -4,6 +4,7 @@ mod proto_array;
|
|||||||
mod proto_array_fork_choice;
|
mod proto_array_fork_choice;
|
||||||
mod ssz_container;
|
mod ssz_container;
|
||||||
|
|
||||||
|
pub use crate::proto_array::InvalidationOperation;
|
||||||
pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
|
pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
|
||||||
|
@ -15,6 +15,56 @@ use types::{
|
|||||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||||
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
|
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
|
||||||
|
|
||||||
|
/// Defines an operation which may invalidate the `execution_status` of some nodes.
|
||||||
|
pub enum InvalidationOperation {
|
||||||
|
/// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors.
|
||||||
|
InvalidateOne { block_root: Hash256 },
|
||||||
|
/// Invalidate blocks between `head_block_root` and `latest_valid_ancestor`.
|
||||||
|
///
|
||||||
|
/// If the `latest_valid_ancestor` is known to fork choice, invalidate all blocks between
|
||||||
|
/// `head_block_root` and `latest_valid_ancestor`. The `head_block_root` will be invalidated,
|
||||||
|
/// whilst the `latest_valid_ancestor` will not.
|
||||||
|
///
|
||||||
|
/// If `latest_valid_ancestor` is *not* known to fork choice, only invalidate the
|
||||||
|
/// `head_block_root` if `always_invalidate_head == true`.
|
||||||
|
InvalidateMany {
|
||||||
|
head_block_root: Hash256,
|
||||||
|
always_invalidate_head: bool,
|
||||||
|
latest_valid_ancestor: ExecutionBlockHash,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InvalidationOperation {
|
||||||
|
pub fn block_root(&self) -> Hash256 {
|
||||||
|
match self {
|
||||||
|
InvalidationOperation::InvalidateOne { block_root } => *block_root,
|
||||||
|
InvalidationOperation::InvalidateMany {
|
||||||
|
head_block_root, ..
|
||||||
|
} => *head_block_root,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn latest_valid_ancestor(&self) -> Option<ExecutionBlockHash> {
|
||||||
|
match self {
|
||||||
|
InvalidationOperation::InvalidateOne { .. } => None,
|
||||||
|
InvalidationOperation::InvalidateMany {
|
||||||
|
latest_valid_ancestor,
|
||||||
|
..
|
||||||
|
} => Some(*latest_valid_ancestor),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn invalidate_block_root(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
InvalidationOperation::InvalidateOne { .. } => true,
|
||||||
|
InvalidationOperation::InvalidateMany {
|
||||||
|
always_invalidate_head,
|
||||||
|
..
|
||||||
|
} => *always_invalidate_head,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
|
||||||
pub struct ProtoNode {
|
pub struct ProtoNode {
|
||||||
/// The `slot` is not necessary for `ProtoArray`, it just exists so external components can
|
/// The `slot` is not necessary for `ProtoArray`, it just exists so external components can
|
||||||
@ -328,43 +378,15 @@ impl ProtoArray {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Invalidate the relevant ancestors and descendants of a block with an invalid execution
|
/// Invalidate zero or more blocks, as specified by the `InvalidationOperation`.
|
||||||
/// payload.
|
|
||||||
///
|
///
|
||||||
/// The `head_block_root` should be the beacon block root of the block with the invalid
|
/// See the documentation of `InvalidationOperation` for usage.
|
||||||
/// execution payload, _or_ its parent where the block with the invalid payload has not yet
|
|
||||||
/// been applied to `self`.
|
|
||||||
///
|
|
||||||
/// The `latest_valid_hash` should be the hash of most recent *valid* execution payload
|
|
||||||
/// contained in an ancestor block of `head_block_root`.
|
|
||||||
///
|
|
||||||
/// This function will invalidate:
|
|
||||||
///
|
|
||||||
/// * The block matching `head_block_root` _unless_ that block has a payload matching `latest_valid_hash`.
|
|
||||||
/// * All ancestors of `head_block_root` back to the block with payload matching
|
|
||||||
/// `latest_valid_hash` (endpoint > exclusive). In the case where the `head_block_root` is the parent
|
|
||||||
/// of the invalid block and itself matches `latest_valid_hash`, no ancestors will be invalidated.
|
|
||||||
/// * All descendants of `latest_valid_hash` if supplied and consistent with `head_block_root`,
|
|
||||||
/// or else all descendants of `head_block_root`.
|
|
||||||
///
|
|
||||||
/// ## Details
|
|
||||||
///
|
|
||||||
/// If `head_block_root` is not known to fork choice, an error is returned.
|
|
||||||
///
|
|
||||||
/// If `latest_valid_hash` is `Some(hash)` where `hash` is either not known to fork choice
|
|
||||||
/// (perhaps it's junk or pre-finalization), then only the `head_block_root` block will be
|
|
||||||
/// invalidated (no ancestors). No error will be returned in this case.
|
|
||||||
///
|
|
||||||
/// If `latest_valid_hash` is `Some(hash)` where `hash` is a known ancestor of
|
|
||||||
/// `head_block_root`, then all blocks between `head_block_root` and `latest_valid_hash` will
|
|
||||||
/// be invalidated. Additionally, all blocks that descend from a newly-invalidated block will
|
|
||||||
/// also be invalidated.
|
|
||||||
pub fn propagate_execution_payload_invalidation(
|
pub fn propagate_execution_payload_invalidation(
|
||||||
&mut self,
|
&mut self,
|
||||||
head_block_root: Hash256,
|
op: &InvalidationOperation,
|
||||||
latest_valid_ancestor_hash: Option<ExecutionBlockHash>,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut invalidated_indices: HashSet<usize> = <_>::default();
|
let mut invalidated_indices: HashSet<usize> = <_>::default();
|
||||||
|
let head_block_root = op.block_root();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Step 1:
|
* Step 1:
|
||||||
@ -379,7 +401,8 @@ impl ProtoArray {
|
|||||||
.ok_or(Error::NodeUnknown(head_block_root))?;
|
.ok_or(Error::NodeUnknown(head_block_root))?;
|
||||||
|
|
||||||
// Try to map the ancestor payload *hash* to an ancestor beacon block *root*.
|
// Try to map the ancestor payload *hash* to an ancestor beacon block *root*.
|
||||||
let latest_valid_ancestor_root = latest_valid_ancestor_hash
|
let latest_valid_ancestor_root = op
|
||||||
|
.latest_valid_ancestor()
|
||||||
.and_then(|hash| self.execution_block_hash_to_beacon_block_root(&hash));
|
.and_then(|hash| self.execution_block_hash_to_beacon_block_root(&hash));
|
||||||
|
|
||||||
// Set to `true` if both conditions are satisfied:
|
// Set to `true` if both conditions are satisfied:
|
||||||
@ -414,7 +437,7 @@ impl ProtoArray {
|
|||||||
// an invalid justified checkpoint.
|
// an invalid justified checkpoint.
|
||||||
if !latest_valid_ancestor_is_descendant && node.root != head_block_root {
|
if !latest_valid_ancestor_is_descendant && node.root != head_block_root {
|
||||||
break;
|
break;
|
||||||
} else if Some(hash) == latest_valid_ancestor_hash {
|
} else if op.latest_valid_ancestor() == Some(hash) {
|
||||||
// If the `best_child` or `best_descendant` of the latest valid hash was
|
// If the `best_child` or `best_descendant` of the latest valid hash was
|
||||||
// invalidated, set those fields to `None`.
|
// invalidated, set those fields to `None`.
|
||||||
//
|
//
|
||||||
@ -444,36 +467,44 @@ impl ProtoArray {
|
|||||||
ExecutionStatus::Irrelevant(_) => break,
|
ExecutionStatus::Irrelevant(_) => break,
|
||||||
}
|
}
|
||||||
|
|
||||||
match &node.execution_status {
|
// Only invalidate the head block if either:
|
||||||
// It's illegal for an execution client to declare that some previously-valid block
|
//
|
||||||
// is now invalid. This is a consensus failure on their behalf.
|
// - The head block was specifically indicated to be invalidated.
|
||||||
ExecutionStatus::Valid(hash) => {
|
// - The latest valid hash is a known ancestor.
|
||||||
return Err(Error::ValidExecutionStatusBecameInvalid {
|
if node.root != head_block_root
|
||||||
block_root: node.root,
|
|| op.invalidate_block_root()
|
||||||
payload_block_hash: *hash,
|
|| latest_valid_ancestor_is_descendant
|
||||||
})
|
{
|
||||||
}
|
match &node.execution_status {
|
||||||
ExecutionStatus::Unknown(hash) => {
|
// It's illegal for an execution client to declare that some previously-valid block
|
||||||
node.execution_status = ExecutionStatus::Invalid(*hash);
|
// is now invalid. This is a consensus failure on their behalf.
|
||||||
|
ExecutionStatus::Valid(hash) => {
|
||||||
|
return Err(Error::ValidExecutionStatusBecameInvalid {
|
||||||
|
block_root: node.root,
|
||||||
|
payload_block_hash: *hash,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ExecutionStatus::Unknown(hash) => {
|
||||||
|
invalidated_indices.insert(index);
|
||||||
|
node.execution_status = ExecutionStatus::Invalid(*hash);
|
||||||
|
|
||||||
// It's impossible for an invalid block to lead to a "best" block, so set these
|
// It's impossible for an invalid block to lead to a "best" block, so set these
|
||||||
// fields to `None`.
|
// fields to `None`.
|
||||||
//
|
//
|
||||||
// Failing to set these values will result in `Self::node_leads_to_viable_head`
|
// Failing to set these values will result in `Self::node_leads_to_viable_head`
|
||||||
// returning `false` for *valid* ancestors of invalid blocks.
|
// returning `false` for *valid* ancestors of invalid blocks.
|
||||||
node.best_child = None;
|
node.best_child = None;
|
||||||
node.best_descendant = None;
|
node.best_descendant = None;
|
||||||
|
}
|
||||||
|
// The block is already invalid, but keep going backwards to ensure all ancestors
|
||||||
|
// are updated.
|
||||||
|
ExecutionStatus::Invalid(_) => (),
|
||||||
|
// This block is pre-merge, therefore it has no execution status. Nor do its
|
||||||
|
// ancestors.
|
||||||
|
ExecutionStatus::Irrelevant(_) => break,
|
||||||
}
|
}
|
||||||
// The block is already invalid, but keep going backwards to ensure all ancestors
|
|
||||||
// are updated.
|
|
||||||
ExecutionStatus::Invalid(_) => (),
|
|
||||||
// This block is pre-merge, therefore it has no execution status. Nor do its
|
|
||||||
// ancestors.
|
|
||||||
ExecutionStatus::Irrelevant(_) => break,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
invalidated_indices.insert(index);
|
|
||||||
|
|
||||||
if let Some(parent_index) = node.parent {
|
if let Some(parent_index) = node.parent {
|
||||||
index = parent_index
|
index = parent_index
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::proto_array::{Iter, ProposerBoost, ProtoArray};
|
use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray};
|
||||||
use crate::ssz_container::SszContainer;
|
use crate::ssz_container::SszContainer;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
@ -191,11 +191,10 @@ impl ProtoArrayForkChoice {
|
|||||||
/// See `ProtoArray::propagate_execution_payload_invalidation` for documentation.
|
/// See `ProtoArray::propagate_execution_payload_invalidation` for documentation.
|
||||||
pub fn process_execution_payload_invalidation(
|
pub fn process_execution_payload_invalidation(
|
||||||
&mut self,
|
&mut self,
|
||||||
head_block_root: Hash256,
|
op: &InvalidationOperation,
|
||||||
latest_valid_ancestor_root: Option<ExecutionBlockHash>,
|
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
self.proto_array
|
self.proto_array
|
||||||
.propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root)
|
.propagate_execution_payload_invalidation(op)
|
||||||
.map_err(|e| format!("Failed to process invalid payload: {:?}", e))
|
.map_err(|e| format!("Failed to process invalid payload: {:?}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,10 +329,10 @@ pub fn partially_verify_execution_payload<T: EthSpec>(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
block_verify!(
|
block_verify!(
|
||||||
payload.random == *state.get_randao_mix(state.current_epoch())?,
|
payload.prev_randao == *state.get_randao_mix(state.current_epoch())?,
|
||||||
BlockProcessingError::ExecutionRandaoMismatch {
|
BlockProcessingError::ExecutionRandaoMismatch {
|
||||||
expected: *state.get_randao_mix(state.current_epoch())?,
|
expected: *state.get_randao_mix(state.current_epoch())?,
|
||||||
found: payload.random,
|
found: payload.prev_randao,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -368,7 +368,7 @@ pub fn process_execution_payload<T: EthSpec>(
|
|||||||
state_root: payload.state_root,
|
state_root: payload.state_root,
|
||||||
receipts_root: payload.receipts_root,
|
receipts_root: payload.receipts_root,
|
||||||
logs_bloom: payload.logs_bloom.clone(),
|
logs_bloom: payload.logs_bloom.clone(),
|
||||||
random: payload.random,
|
prev_randao: payload.prev_randao,
|
||||||
block_number: payload.block_number,
|
block_number: payload.block_number,
|
||||||
gas_limit: payload.gas_limit,
|
gas_limit: payload.gas_limit,
|
||||||
gas_used: payload.gas_used,
|
gas_used: payload.gas_used,
|
||||||
|
@ -21,7 +21,7 @@ pub struct ExecutionPayload<T: EthSpec> {
|
|||||||
pub receipts_root: Hash256,
|
pub receipts_root: Hash256,
|
||||||
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
|
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
|
||||||
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
@ -15,7 +15,7 @@ pub struct ExecutionPayloadHeader<T: EthSpec> {
|
|||||||
pub receipts_root: Hash256,
|
pub receipts_root: Hash256,
|
||||||
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
|
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
|
||||||
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
||||||
pub random: Hash256,
|
pub prev_randao: Hash256,
|
||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||||
|
@ -23,7 +23,7 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
base_fee_per_gas,
|
base_fee_per_gas,
|
||||||
timestamp: genesis_time,
|
timestamp: genesis_time,
|
||||||
block_hash: eth1_block_hash,
|
block_hash: eth1_block_hash,
|
||||||
random: eth1_block_hash.into_root(),
|
prev_randao: eth1_block_hash.into_root(),
|
||||||
..ExecutionPayloadHeader::default()
|
..ExecutionPayloadHeader::default()
|
||||||
};
|
};
|
||||||
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;
|
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
TESTS_TAG := v1.1.9
|
TESTS_TAG := v1.1.10
|
||||||
TESTS = general minimal mainnet
|
TESTS = general minimal mainnet
|
||||||
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
||||||
|
|
||||||
|
@ -3,8 +3,6 @@ name = "execution_engine_integration"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
test:
|
test:
|
||||||
cargo test --release --locked
|
cargo run --release --locked
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf execution_clients
|
rm -rf execution_clients
|
||||||
|
@ -3,10 +3,10 @@ use std::fs;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
const GETH_BRANCH: &str = "merge-kiln";
|
const GETH_BRANCH: &str = "merge-kiln-v2";
|
||||||
const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum";
|
const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum";
|
||||||
|
|
||||||
fn main() {
|
pub fn build() {
|
||||||
let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into();
|
let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into();
|
||||||
let execution_clients_dir = manifest_dir.join("execution_clients");
|
let execution_clients_dir = manifest_dir.join("execution_clients");
|
||||||
|
|
||||||
@ -52,11 +52,15 @@ fn build_geth(execution_clients_dir: &Path) {
|
|||||||
.success());
|
.success());
|
||||||
|
|
||||||
// Build geth
|
// Build geth
|
||||||
assert!(Command::new("make")
|
let make_result = Command::new("make")
|
||||||
.arg("geth")
|
.arg("geth")
|
||||||
.current_dir(&repo_dir)
|
.current_dir(&repo_dir)
|
||||||
.output()
|
.output()
|
||||||
.expect("failed to make geth")
|
.expect("failed to make geth");
|
||||||
.status
|
|
||||||
.success());
|
if !make_result.status.success() {
|
||||||
|
dbg!(String::from_utf8_lossy(&make_result.stdout));
|
||||||
|
dbg!(String::from_utf8_lossy(&make_result.stderr));
|
||||||
|
panic!("make failed");
|
||||||
|
}
|
||||||
}
|
}
|
@ -9,7 +9,7 @@ use unused_port::unused_tcp_port;
|
|||||||
/// Defined for each EE type (e.g., Geth, Nethermind, etc).
|
/// Defined for each EE type (e.g., Geth, Nethermind, etc).
|
||||||
pub trait GenericExecutionEngine: Clone {
|
pub trait GenericExecutionEngine: Clone {
|
||||||
fn init_datadir() -> TempDir;
|
fn init_datadir() -> TempDir;
|
||||||
fn start_client(datadir: &TempDir, http_port: u16) -> Child;
|
fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Holds handle to a running EE process, plus some other metadata.
|
/// Holds handle to a running EE process, plus some other metadata.
|
||||||
@ -19,6 +19,7 @@ pub struct ExecutionEngine<E> {
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
datadir: TempDir,
|
datadir: TempDir,
|
||||||
http_port: u16,
|
http_port: u16,
|
||||||
|
http_auth_port: u16,
|
||||||
child: Child,
|
child: Child,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,11 +36,13 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> {
|
|||||||
pub fn new(engine: E) -> Self {
|
pub fn new(engine: E) -> Self {
|
||||||
let datadir = E::init_datadir();
|
let datadir = E::init_datadir();
|
||||||
let http_port = unused_tcp_port().unwrap();
|
let http_port = unused_tcp_port().unwrap();
|
||||||
let child = E::start_client(&datadir, http_port);
|
let http_auth_port = unused_tcp_port().unwrap();
|
||||||
|
let child = E::start_client(&datadir, http_port, http_auth_port);
|
||||||
Self {
|
Self {
|
||||||
engine,
|
engine,
|
||||||
datadir,
|
datadir,
|
||||||
http_port,
|
http_port,
|
||||||
|
http_auth_port,
|
||||||
child,
|
child,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -47,6 +50,11 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> {
|
|||||||
pub fn http_url(&self) -> SensitiveUrl {
|
pub fn http_url(&self) -> SensitiveUrl {
|
||||||
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap()
|
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)] // Future use.
|
||||||
|
pub fn http_ath_url(&self) -> SensitiveUrl {
|
||||||
|
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -90,7 +98,7 @@ impl GenericExecutionEngine for Geth {
|
|||||||
datadir
|
datadir
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_client(datadir: &TempDir, http_port: u16) -> Child {
|
fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child {
|
||||||
let network_port = unused_tcp_port().unwrap();
|
let network_port = unused_tcp_port().unwrap();
|
||||||
|
|
||||||
Command::new(Self::binary_path())
|
Command::new(Self::binary_path())
|
||||||
@ -101,6 +109,8 @@ impl GenericExecutionEngine for Geth {
|
|||||||
.arg("engine,eth")
|
.arg("engine,eth")
|
||||||
.arg("--http.port")
|
.arg("--http.port")
|
||||||
.arg(http_port.to_string())
|
.arg(http_port.to_string())
|
||||||
|
.arg("--http.authport")
|
||||||
|
.arg(http_auth_port.to_string())
|
||||||
.arg("--port")
|
.arg("--port")
|
||||||
.arg(network_port.to_string())
|
.arg(network_port.to_string())
|
||||||
.stdout(build_stdio())
|
.stdout(build_stdio())
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
/// This library provides integration testing between Lighthouse and other execution engines.
|
|
||||||
///
|
|
||||||
/// See the `tests/tests.rs` file to run tests.
|
|
||||||
mod execution_engine;
|
|
||||||
mod genesis_json;
|
|
||||||
mod test_rig;
|
|
||||||
|
|
||||||
pub use execution_engine::Geth;
|
|
||||||
pub use test_rig::TestRig;
|
|
||||||
|
|
||||||
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
|
|
||||||
const SUPPRESS_LOGS: bool = true;
|
|
28
testing/execution_engine_integration/src/main.rs
Normal file
28
testing/execution_engine_integration/src/main.rs
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
/// This binary runs integration tests between Lighthouse and execution engines.
|
||||||
|
///
|
||||||
|
/// It will first attempt to build any supported integration clients, then it will run tests.
|
||||||
|
///
|
||||||
|
/// A return code of `0` indicates the tests succeeded.
|
||||||
|
mod build_geth;
|
||||||
|
mod execution_engine;
|
||||||
|
mod genesis_json;
|
||||||
|
mod test_rig;
|
||||||
|
|
||||||
|
use execution_engine::Geth;
|
||||||
|
use test_rig::TestRig;
|
||||||
|
|
||||||
|
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
|
||||||
|
const SUPPRESS_LOGS: bool = false;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
if cfg!(windows) {
|
||||||
|
panic!("windows is not supported, only linux");
|
||||||
|
}
|
||||||
|
|
||||||
|
test_geth()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_geth() {
|
||||||
|
build_geth::build();
|
||||||
|
TestRig::new(Geth).perform_tests_blocking();
|
||||||
|
}
|
@ -138,7 +138,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
|
|
||||||
let parent_hash = terminal_pow_block_hash;
|
let parent_hash = terminal_pow_block_hash;
|
||||||
let timestamp = timestamp_now();
|
let timestamp = timestamp_now();
|
||||||
let random = Hash256::zero();
|
let prev_randao = Hash256::zero();
|
||||||
let finalized_block_hash = ExecutionBlockHash::zero();
|
let finalized_block_hash = ExecutionBlockHash::zero();
|
||||||
let proposer_index = 0;
|
let proposer_index = 0;
|
||||||
let valid_payload = self
|
let valid_payload = self
|
||||||
@ -147,7 +147,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
.get_payload::<MainnetEthSpec>(
|
.get_payload::<MainnetEthSpec>(
|
||||||
parent_hash,
|
parent_hash,
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
finalized_block_hash,
|
finalized_block_hash,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
)
|
)
|
||||||
@ -210,7 +210,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
let mut invalid_payload = valid_payload.clone();
|
let mut invalid_payload = valid_payload.clone();
|
||||||
invalid_payload.random = Hash256::from_low_u64_be(42);
|
invalid_payload.prev_randao = Hash256::from_low_u64_be(42);
|
||||||
let status = self
|
let status = self
|
||||||
.ee_a
|
.ee_a
|
||||||
.execution_layer
|
.execution_layer
|
||||||
@ -227,7 +227,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
|
|
||||||
let parent_hash = valid_payload.block_hash;
|
let parent_hash = valid_payload.block_hash;
|
||||||
let timestamp = valid_payload.timestamp + 1;
|
let timestamp = valid_payload.timestamp + 1;
|
||||||
let random = Hash256::zero();
|
let prev_randao = Hash256::zero();
|
||||||
let finalized_block_hash = ExecutionBlockHash::zero();
|
let finalized_block_hash = ExecutionBlockHash::zero();
|
||||||
let proposer_index = 0;
|
let proposer_index = 0;
|
||||||
let second_payload = self
|
let second_payload = self
|
||||||
@ -236,7 +236,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
.get_payload::<MainnetEthSpec>(
|
.get_payload::<MainnetEthSpec>(
|
||||||
parent_hash,
|
parent_hash,
|
||||||
timestamp,
|
timestamp,
|
||||||
random,
|
prev_randao,
|
||||||
finalized_block_hash,
|
finalized_block_hash,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
)
|
)
|
||||||
@ -266,7 +266,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
|
|||||||
let finalized_block_hash = ExecutionBlockHash::zero();
|
let finalized_block_hash = ExecutionBlockHash::zero();
|
||||||
let payload_attributes = Some(PayloadAttributes {
|
let payload_attributes = Some(PayloadAttributes {
|
||||||
timestamp: second_payload.timestamp + 1,
|
timestamp: second_payload.timestamp + 1,
|
||||||
random: Hash256::zero(),
|
prev_randao: Hash256::zero(),
|
||||||
suggested_fee_recipient: Address::zero(),
|
suggested_fee_recipient: Address::zero(),
|
||||||
});
|
});
|
||||||
let status = self
|
let status = self
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
#[cfg(not(target_family = "windows"))]
|
|
||||||
mod not_windows {
|
|
||||||
use execution_engine_integration::{Geth, TestRig};
|
|
||||||
#[test]
|
|
||||||
fn geth() {
|
|
||||||
TestRig::new(Geth).perform_tests_blocking()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(target_family = "windows")]
|
|
||||||
mod windows {
|
|
||||||
#[test]
|
|
||||||
fn all_tests_skipped_on_windows() {
|
|
||||||
//
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user