Merge branch 'unstable' of https://github.com/sigp/lighthouse into merge-unstable-deneb-jul-14
This commit is contained in:
commit
b96db45090
25
.github/workflows/test-suite.yml
vendored
25
.github/workflows/test-suite.yml
vendored
@ -58,8 +58,8 @@ jobs:
|
|||||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
- name: Run tests in release
|
- name: Run tests in release
|
||||||
run: make test-release
|
run: make test-release
|
||||||
release-tests-windows:
|
release-tests-windows:
|
||||||
@ -78,9 +78,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
choco install python protoc visualstudio2019-workload-vctools -y
|
choco install python protoc visualstudio2019-workload-vctools -y
|
||||||
npm config set msvs_version 2019
|
npm config set msvs_version 2019
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
# Extra feature to work around https://github.com/foundry-rs/foundry/issues/5115
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil --features ethers/ipc
|
|
||||||
- name: Install make
|
- name: Install make
|
||||||
run: choco install -y make
|
run: choco install -y make
|
||||||
- uses: KyleMayes/install-llvm-action@v1
|
- uses: KyleMayes/install-llvm-action@v1
|
||||||
@ -155,8 +154,8 @@ jobs:
|
|||||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
run: make test-debug
|
run: make test-debug
|
||||||
state-transition-vectors-ubuntu:
|
state-transition-vectors-ubuntu:
|
||||||
@ -211,8 +210,8 @@ jobs:
|
|||||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||||
run: cargo run --release --bin simulator eth1-sim
|
run: cargo run --release --bin simulator eth1-sim
|
||||||
merge-transition-ubuntu:
|
merge-transition-ubuntu:
|
||||||
@ -227,8 +226,8 @@ jobs:
|
|||||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
- name: Run the beacon chain sim and go through the merge transition
|
- name: Run the beacon chain sim and go through the merge transition
|
||||||
run: cargo run --release --bin simulator eth1-sim --post-merge
|
run: cargo run --release --bin simulator eth1-sim --post-merge
|
||||||
no-eth1-simulator-ubuntu:
|
no-eth1-simulator-ubuntu:
|
||||||
@ -257,8 +256,8 @@ jobs:
|
|||||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install anvil
|
- name: Install Foundry (anvil)
|
||||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
uses: foundry-rs/foundry-toolchain@v1
|
||||||
- name: Run the syncing simulator
|
- name: Run the syncing simulator
|
||||||
run: cargo run --release --bin simulator syncing-sim
|
run: cargo run --release --bin simulator syncing-sim
|
||||||
doppelganger-protection-test:
|
doppelganger-protection-test:
|
||||||
|
97
Cargo.lock
generated
97
Cargo.lock
generated
@ -257,6 +257,15 @@ dependencies = [
|
|||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anvil-rpc"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anyhow"
|
name = "anyhow"
|
||||||
version = "1.0.71"
|
version = "1.0.71"
|
||||||
@ -506,9 +515,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum"
|
name = "axum"
|
||||||
version = "0.5.17"
|
version = "0.6.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43"
|
checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum-core",
|
"axum-core",
|
||||||
@ -524,22 +533,23 @@ dependencies = [
|
|||||||
"mime",
|
"mime",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
|
"rustversion",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_path_to_error",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"sync_wrapper",
|
"sync_wrapper",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower",
|
"tower",
|
||||||
"tower-http",
|
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum-core"
|
name = "axum-core"
|
||||||
version = "0.2.9"
|
version = "0.3.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc"
|
checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@ -547,6 +557,7 @@ dependencies = [
|
|||||||
"http",
|
"http",
|
||||||
"http-body",
|
"http-body",
|
||||||
"mime",
|
"mime",
|
||||||
|
"rustversion",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
]
|
]
|
||||||
@ -605,7 +616,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "beacon-api-client"
|
name = "beacon-api-client"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/ralexstokes/beacon-api-client#30679e9e25d61731cde54e14cd8a3688a39d8e5b"
|
source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethereum-consensus",
|
"ethereum-consensus",
|
||||||
"http",
|
"http",
|
||||||
@ -2605,7 +2616,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "ethereum-consensus"
|
name = "ethereum-consensus"
|
||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
source = "git+https://github.com/ralexstokes//ethereum-consensus?rev=9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d#9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d"
|
source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"blst",
|
"blst",
|
||||||
@ -2618,8 +2629,9 @@ dependencies = [
|
|||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_yaml",
|
||||||
"sha2 0.9.9",
|
"sha2 0.9.9",
|
||||||
"ssz-rs",
|
"ssz_rs",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@ -2893,7 +2905,7 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"slog",
|
"slog",
|
||||||
"slot_clock",
|
"slot_clock",
|
||||||
"ssz-rs",
|
"ssz_rs",
|
||||||
"ssz_types",
|
"ssz_types",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"strum",
|
"strum",
|
||||||
@ -3618,12 +3630,6 @@ dependencies = [
|
|||||||
"pin-project-lite 0.2.9",
|
"pin-project-lite 0.2.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "http-range-header"
|
|
||||||
version = "0.3.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http_api"
|
name = "http_api"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -5104,9 +5110,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "matchit"
|
name = "matchit"
|
||||||
version = "0.5.0"
|
version = "0.7.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb"
|
checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "md-5"
|
name = "md-5"
|
||||||
@ -5195,16 +5201,20 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mev-rs"
|
name = "mev-rs"
|
||||||
version = "0.2.1"
|
version = "0.3.0"
|
||||||
source = "git+https://github.com/ralexstokes//mev-rs?rev=7813d4a4a564e0754e9aaab2d95520ba437c3889#7813d4a4a564e0754e9aaab2d95520ba437c3889"
|
source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"anvil-rpc",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum",
|
"axum",
|
||||||
"beacon-api-client",
|
"beacon-api-client",
|
||||||
"ethereum-consensus",
|
"ethereum-consensus",
|
||||||
"hyper",
|
"hyper",
|
||||||
|
"parking_lot 0.12.1",
|
||||||
|
"reqwest",
|
||||||
"serde",
|
"serde",
|
||||||
"ssz-rs",
|
"serde_json",
|
||||||
|
"ssz_rs",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
@ -5655,6 +5665,7 @@ dependencies = [
|
|||||||
"execution_layer",
|
"execution_layer",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
|
"tokio",
|
||||||
"types",
|
"types",
|
||||||
"validator_client",
|
"validator_client",
|
||||||
"validator_dir",
|
"validator_dir",
|
||||||
@ -7492,6 +7503,16 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_path_to_error"
|
||||||
|
version = "0.1.12"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0b1b6471d7496b051e03f1958802a73f88b947866f5146f329e47e36554f4e55"
|
||||||
|
dependencies = [
|
||||||
|
"itoa",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_repr"
|
name = "serde_repr"
|
||||||
version = "0.1.12"
|
version = "0.1.12"
|
||||||
@ -8001,23 +8022,24 @@ dependencies = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ssz-rs"
|
name = "ssz_rs"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitvec 1.0.1",
|
"bitvec 1.0.1",
|
||||||
"hex",
|
"hex",
|
||||||
"num-bigint",
|
"num-bigint",
|
||||||
"serde",
|
"serde",
|
||||||
"sha2 0.9.9",
|
"sha2 0.9.9",
|
||||||
"ssz-rs-derive",
|
"ssz_rs_derive",
|
||||||
"thiserror",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ssz-rs-derive"
|
name = "ssz_rs_derive"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28ef89da1fab316465e1#adf1a0b14cef90b9536f28ef89da1fab316465e1"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -8812,25 +8834,6 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tower-http"
|
|
||||||
version = "0.3.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858"
|
|
||||||
dependencies = [
|
|
||||||
"bitflags 1.3.2",
|
|
||||||
"bytes",
|
|
||||||
"futures-core",
|
|
||||||
"futures-util",
|
|
||||||
"http",
|
|
||||||
"http-body",
|
|
||||||
"http-range-header",
|
|
||||||
"pin-project-lite 0.2.9",
|
|
||||||
"tower",
|
|
||||||
"tower-layer",
|
|
||||||
"tower-service",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower-layer"
|
name = "tower-layer"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
|
@ -93,13 +93,6 @@ resolver = "2"
|
|||||||
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
|
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
|
||||||
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
|
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
|
||||||
|
|
||||||
[patch."https://github.com/ralexstokes/mev-rs"]
|
|
||||||
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
|
|
||||||
[patch."https://github.com/ralexstokes/ethereum-consensus"]
|
|
||||||
ethereum-consensus = { git = "https://github.com/ralexstokes//ethereum-consensus", rev = "9b0ee0a8a45b968c8df5e7e64ea1c094e16f053d" }
|
|
||||||
[patch."https://github.com/ralexstokes/ssz-rs"]
|
|
||||||
ssz-rs = { git = "https://github.com/ralexstokes//ssz-rs", rev = "adf1a0b14cef90b9536f28ef89da1fab316465e1" }
|
|
||||||
|
|
||||||
[profile.maxperf]
|
[profile.maxperf]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
lto = "fat"
|
lto = "fat"
|
||||||
|
@ -86,7 +86,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let ideal_reward = reward_numerator
|
let ideal_reward = reward_numerator
|
||||||
.safe_div(active_increments)?
|
.safe_div(active_increments)?
|
||||||
.safe_div(WEIGHT_DENOMINATOR)?;
|
.safe_div(WEIGHT_DENOMINATOR)?;
|
||||||
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
if !state.is_in_inactivity_leak(previous_epoch, spec)? {
|
||||||
ideal_rewards_hashmap
|
ideal_rewards_hashmap
|
||||||
.insert((flag_index, effective_balance), (ideal_reward, penalty));
|
.insert((flag_index, effective_balance), (ideal_reward, penalty));
|
||||||
} else {
|
} else {
|
||||||
|
@ -236,6 +236,17 @@ pub struct PrePayloadAttributes {
|
|||||||
pub parent_block_number: u64,
|
pub parent_block_number: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Information about a state/block at a specific slot.
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct FinalizationAndCanonicity {
|
||||||
|
/// True if the slot of the state or block is finalized.
|
||||||
|
///
|
||||||
|
/// This alone DOES NOT imply that the state/block is finalized, use `self.is_finalized()`.
|
||||||
|
pub slot_is_finalized: bool,
|
||||||
|
/// True if the state or block is canonical at its slot.
|
||||||
|
pub canonical: bool,
|
||||||
|
}
|
||||||
|
|
||||||
/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
|
/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already
|
||||||
/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
|
/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled.
|
||||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||||
@ -470,6 +481,12 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
|
|
||||||
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
|
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
|
||||||
|
|
||||||
|
impl FinalizationAndCanonicity {
|
||||||
|
pub fn is_finalized(self) -> bool {
|
||||||
|
self.slot_is_finalized && self.canonical
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||||
/// Checks if a block is finalized.
|
/// Checks if a block is finalized.
|
||||||
/// The finalization check is done with the block slot. The block root is used to verify that
|
/// The finalization check is done with the block slot. The block root is used to verify that
|
||||||
@ -499,16 +516,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
state_slot: Slot,
|
state_slot: Slot,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
|
self.state_finalization_and_canonicity(state_root, state_slot)
|
||||||
|
.map(FinalizationAndCanonicity::is_finalized)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the finalization and canonicity status of the state with `state_root`.
|
||||||
|
pub fn state_finalization_and_canonicity(
|
||||||
|
&self,
|
||||||
|
state_root: &Hash256,
|
||||||
|
state_slot: Slot,
|
||||||
|
) -> Result<FinalizationAndCanonicity, Error> {
|
||||||
let finalized_slot = self
|
let finalized_slot = self
|
||||||
.canonical_head
|
.canonical_head
|
||||||
.cached_head()
|
.cached_head()
|
||||||
.finalized_checkpoint()
|
.finalized_checkpoint()
|
||||||
.epoch
|
.epoch
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
let is_canonical = self
|
let slot_is_finalized = state_slot <= finalized_slot;
|
||||||
|
let canonical = self
|
||||||
.state_root_at_slot(state_slot)?
|
.state_root_at_slot(state_slot)?
|
||||||
.map_or(false, |canonical_root| state_root == &canonical_root);
|
.map_or(false, |canonical_root| state_root == &canonical_root);
|
||||||
Ok(state_slot <= finalized_slot && is_canonical)
|
Ok(FinalizationAndCanonicity {
|
||||||
|
slot_is_finalized,
|
||||||
|
canonical,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Persists the head tracker and fork choice.
|
/// Persists the head tracker and fork choice.
|
||||||
|
@ -83,6 +83,8 @@ pub struct ChainConfig {
|
|||||||
pub enable_backfill_rate_limiting: bool,
|
pub enable_backfill_rate_limiting: bool,
|
||||||
/// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
|
/// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
|
||||||
pub progressive_balances_mode: ProgressiveBalancesMode,
|
pub progressive_balances_mode: ProgressiveBalancesMode,
|
||||||
|
/// Number of epochs between each migration of data from the hot database to the freezer.
|
||||||
|
pub epochs_per_migration: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChainConfig {
|
impl Default for ChainConfig {
|
||||||
@ -114,6 +116,7 @@ impl Default for ChainConfig {
|
|||||||
always_prepare_payload: false,
|
always_prepare_payload: false,
|
||||||
enable_backfill_rate_limiting: true,
|
enable_backfill_rate_limiting: true,
|
||||||
progressive_balances_mode: ProgressiveBalancesMode::Checked,
|
progressive_balances_mode: ProgressiveBalancesMode::Checked,
|
||||||
|
epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,7 @@ pub use execution_payload::NotifyExecutionLayer;
|
|||||||
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
|
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
|
||||||
pub use kzg::TrustedSetup;
|
pub use kzg::TrustedSetup;
|
||||||
pub use metrics::scrape_for_metrics;
|
pub use metrics::scrape_for_metrics;
|
||||||
|
pub use migrate::MigratorConfig;
|
||||||
pub use parking_lot;
|
pub use parking_lot;
|
||||||
pub use slot_clock;
|
pub use slot_clock;
|
||||||
pub use state_processing::per_block_processing::errors::{
|
pub use state_processing::per_block_processing::errors::{
|
||||||
|
@ -25,10 +25,15 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200;
|
|||||||
/// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
|
/// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`.
|
||||||
const COMPACTION_FINALITY_DISTANCE: u64 = 1024;
|
const COMPACTION_FINALITY_DISTANCE: u64 = 1024;
|
||||||
|
|
||||||
|
/// Default number of epochs to wait between finalization migrations.
|
||||||
|
pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1;
|
||||||
|
|
||||||
/// The background migrator runs a thread to perform pruning and migrate state from the hot
|
/// The background migrator runs a thread to perform pruning and migrate state from the hot
|
||||||
/// to the cold database.
|
/// to the cold database.
|
||||||
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||||
db: Arc<HotColdDB<E, Hot, Cold>>,
|
db: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
|
/// Record of when the last migration ran, for enforcing `epochs_per_migration`.
|
||||||
|
prev_migration: Arc<Mutex<PrevMigration>>,
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>,
|
tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>,
|
||||||
/// Genesis block root, for persisting the `PersistedBeaconChain`.
|
/// Genesis block root, for persisting the `PersistedBeaconChain`.
|
||||||
@ -36,9 +41,22 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct MigratorConfig {
|
pub struct MigratorConfig {
|
||||||
pub blocking: bool,
|
pub blocking: bool,
|
||||||
|
/// Run migrations at most once per `epochs_per_migration`.
|
||||||
|
///
|
||||||
|
/// If set to 0 or 1, then run every finalization.
|
||||||
|
pub epochs_per_migration: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MigratorConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
blocking: false,
|
||||||
|
epochs_per_migration: DEFAULT_EPOCHS_PER_MIGRATION,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MigratorConfig {
|
impl MigratorConfig {
|
||||||
@ -46,6 +64,19 @@ impl MigratorConfig {
|
|||||||
self.blocking = true;
|
self.blocking = true;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn epochs_per_migration(mut self, epochs_per_migration: u64) -> Self {
|
||||||
|
self.epochs_per_migration = epochs_per_migration;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record of when the last migration ran.
|
||||||
|
pub struct PrevMigration {
|
||||||
|
/// The epoch at which the last finalization migration ran.
|
||||||
|
epoch: Epoch,
|
||||||
|
/// The number of epochs to wait between runs.
|
||||||
|
epochs_per_migration: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pruning can be successful, or in rare cases deferred to a later point.
|
/// Pruning can be successful, or in rare cases deferred to a later point.
|
||||||
@ -93,6 +124,7 @@ pub struct FinalizationNotification {
|
|||||||
finalized_state_root: BeaconStateHash,
|
finalized_state_root: BeaconStateHash,
|
||||||
finalized_checkpoint: Checkpoint,
|
finalized_checkpoint: Checkpoint,
|
||||||
head_tracker: Arc<HeadTracker>,
|
head_tracker: Arc<HeadTracker>,
|
||||||
|
prev_migration: Arc<Mutex<PrevMigration>>,
|
||||||
genesis_block_root: Hash256,
|
genesis_block_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,6 +136,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
genesis_block_root: Hash256,
|
genesis_block_root: Hash256,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
// Estimate last migration run from DB split slot.
|
||||||
|
let prev_migration = Arc::new(Mutex::new(PrevMigration {
|
||||||
|
epoch: db.get_split_slot().epoch(E::slots_per_epoch()),
|
||||||
|
epochs_per_migration: config.epochs_per_migration,
|
||||||
|
}));
|
||||||
let tx_thread = if config.blocking {
|
let tx_thread = if config.blocking {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@ -112,6 +149,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
Self {
|
Self {
|
||||||
db,
|
db,
|
||||||
tx_thread,
|
tx_thread,
|
||||||
|
prev_migration,
|
||||||
genesis_block_root,
|
genesis_block_root,
|
||||||
log,
|
log,
|
||||||
}
|
}
|
||||||
@ -132,6 +170,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
finalized_state_root,
|
finalized_state_root,
|
||||||
finalized_checkpoint,
|
finalized_checkpoint,
|
||||||
head_tracker,
|
head_tracker,
|
||||||
|
prev_migration: self.prev_migration.clone(),
|
||||||
genesis_block_root: self.genesis_block_root,
|
genesis_block_root: self.genesis_block_root,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -227,6 +266,26 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
notif: FinalizationNotification,
|
notif: FinalizationNotification,
|
||||||
log: &Logger,
|
log: &Logger,
|
||||||
) {
|
) {
|
||||||
|
// Do not run too frequently.
|
||||||
|
let epoch = notif.finalized_checkpoint.epoch;
|
||||||
|
let mut prev_migration = notif.prev_migration.lock();
|
||||||
|
if epoch < prev_migration.epoch + prev_migration.epochs_per_migration {
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Database consolidation deferred";
|
||||||
|
"last_finalized_epoch" => prev_migration.epoch,
|
||||||
|
"new_finalized_epoch" => epoch,
|
||||||
|
"epochs_per_migration" => prev_migration.epochs_per_migration,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the previous migration epoch immediately to avoid holding the lock. If the
|
||||||
|
// migration doesn't succeed then the next migration will be retried at the next scheduled
|
||||||
|
// run.
|
||||||
|
prev_migration.epoch = epoch;
|
||||||
|
drop(prev_migration);
|
||||||
|
|
||||||
debug!(log, "Database consolidation started");
|
debug!(log, "Database consolidation started");
|
||||||
|
|
||||||
let finalized_state_root = notif.finalized_state_root;
|
let finalized_state_root = notif.finalized_state_root;
|
||||||
|
@ -519,6 +519,7 @@ where
|
|||||||
let validator_keypairs = self
|
let validator_keypairs = self
|
||||||
.validator_keypairs
|
.validator_keypairs
|
||||||
.expect("cannot build without validator keypairs");
|
.expect("cannot build without validator keypairs");
|
||||||
|
let chain_config = self.chain_config.unwrap_or_default();
|
||||||
let trusted_setup: TrustedSetup =
|
let trusted_setup: TrustedSetup =
|
||||||
serde_json::from_reader(eth2_network_config::get_trusted_setup::<E::Kzg>())
|
serde_json::from_reader(eth2_network_config::get_trusted_setup::<E::Kzg>())
|
||||||
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
|
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
|
||||||
@ -528,13 +529,17 @@ where
|
|||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
.custom_spec(spec)
|
.custom_spec(spec)
|
||||||
.store(self.store.expect("cannot build without store"))
|
.store(self.store.expect("cannot build without store"))
|
||||||
.store_migrator_config(MigratorConfig::default().blocking())
|
.store_migrator_config(
|
||||||
|
MigratorConfig::default()
|
||||||
|
.blocking()
|
||||||
|
.epochs_per_migration(chain_config.epochs_per_migration),
|
||||||
|
)
|
||||||
.task_executor(self.runtime.task_executor.clone())
|
.task_executor(self.runtime.task_executor.clone())
|
||||||
.execution_layer(self.execution_layer)
|
.execution_layer(self.execution_layer)
|
||||||
.dummy_eth1_backend()
|
.dummy_eth1_backend()
|
||||||
.expect("should build dummy backend")
|
.expect("should build dummy backend")
|
||||||
.shutdown_sender(shutdown_tx)
|
.shutdown_sender(shutdown_tx)
|
||||||
.chain_config(self.chain_config.unwrap_or_default())
|
.chain_config(chain_config)
|
||||||
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
||||||
log.clone(),
|
log.clone(),
|
||||||
5,
|
5,
|
||||||
|
@ -12,7 +12,7 @@ use beacon_chain::{
|
|||||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||||
state_advance_timer::spawn_state_advance_timer,
|
state_advance_timer::spawn_state_advance_timer,
|
||||||
store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
|
store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
|
||||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, ServerSentEventHandler,
|
BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler,
|
||||||
};
|
};
|
||||||
use beacon_processor::{
|
use beacon_processor::{
|
||||||
work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend,
|
work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessor, BeaconProcessorSend,
|
||||||
@ -188,6 +188,9 @@ where
|
|||||||
.store(store)
|
.store(store)
|
||||||
.task_executor(context.executor.clone())
|
.task_executor(context.executor.clone())
|
||||||
.custom_spec(spec.clone())
|
.custom_spec(spec.clone())
|
||||||
|
.store_migrator_config(
|
||||||
|
MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration),
|
||||||
|
)
|
||||||
.chain_config(chain_config)
|
.chain_config(chain_config)
|
||||||
.graffiti(graffiti)
|
.graffiti(graffiti)
|
||||||
.event_handler(event_handler)
|
.event_handler(event_handler)
|
||||||
|
@ -42,9 +42,9 @@ lazy_static = "1.4.0"
|
|||||||
ethers-core = "1.0.2"
|
ethers-core = "1.0.2"
|
||||||
builder_client = { path = "../builder_client" }
|
builder_client = { path = "../builder_client" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
mev-rs = { git = "https://github.com/ralexstokes/mev-rs" }
|
mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" }
|
||||||
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" }
|
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" }
|
||||||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs" }
|
ssz_rs = "0.9.0"
|
||||||
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
||||||
strum = "0.24.0"
|
strum = "0.24.0"
|
||||||
keccak-hash = "0.10.0"
|
keccak-hash = "0.10.0"
|
||||||
|
@ -237,6 +237,7 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
|||||||
BlockProposalContents::Payload {
|
BlockProposalContents::Payload {
|
||||||
payload: Payload::default_at_fork(fork_name)?,
|
payload: Payload::default_at_fork(fork_name)?,
|
||||||
block_value: Uint256::zero(),
|
block_value: Uint256::zero(),
|
||||||
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ForkName::Deneb => BlockProposalContents::PayloadAndBlobs {
|
ForkName::Deneb => BlockProposalContents::PayloadAndBlobs {
|
||||||
@ -938,6 +939,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
BlockProposalContents::Payload {
|
BlockProposalContents::Payload {
|
||||||
payload: relay.data.message.header,
|
payload: relay.data.message.header,
|
||||||
block_value: relay.data.message.value,
|
block_value: relay.data.message.value,
|
||||||
|
_phantom: PhantomData,
|
||||||
},
|
},
|
||||||
)),
|
)),
|
||||||
Err(reason) if !reason.payload_invalid() => {
|
Err(reason) if !reason.payload_invalid() => {
|
||||||
@ -992,6 +994,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
BlockProposalContents::Payload {
|
BlockProposalContents::Payload {
|
||||||
payload: relay.data.message.header,
|
payload: relay.data.message.header,
|
||||||
block_value: relay.data.message.value,
|
block_value: relay.data.message.value,
|
||||||
|
_phantom: PhantomData,
|
||||||
},
|
},
|
||||||
)),
|
)),
|
||||||
// If the payload is valid then use it. The local EE failed
|
// If the payload is valid then use it. The local EE failed
|
||||||
@ -1000,6 +1003,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
BlockProposalContents::Payload {
|
BlockProposalContents::Payload {
|
||||||
payload: relay.data.message.header,
|
payload: relay.data.message.header,
|
||||||
block_value: relay.data.message.value,
|
block_value: relay.data.message.value,
|
||||||
|
_phantom: PhantomData,
|
||||||
},
|
},
|
||||||
)),
|
)),
|
||||||
Err(reason) => {
|
Err(reason) => {
|
||||||
@ -1178,8 +1182,9 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
"parent_hash" => ?parent_hash,
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
engine.api.get_payload::<T>(current_fork, payload_id).await
|
engine.api.get_payload::<T>(current_fork, payload_id).await
|
||||||
}.await?;
|
};
|
||||||
|
let payload_response = payload_fut.await;
|
||||||
|
let (execution_payload, block_value) = payload_response.map(|payload_response| {
|
||||||
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
|
if payload_response.execution_payload_ref().fee_recipient() != payload_attributes.suggested_fee_recipient() {
|
||||||
error!(
|
error!(
|
||||||
self.log(),
|
self.log(),
|
||||||
@ -1200,8 +1205,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
attempts."
|
attempts."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
payload_response.into()
|
||||||
Ok(payload_response.into())
|
})?;
|
||||||
|
Ok(BlockProposalContents::Payload {
|
||||||
|
payload: execution_payload.into(),
|
||||||
|
block_value,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
})
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.map_err(Box::new)
|
.map_err(Box::new)
|
||||||
@ -2126,6 +2136,22 @@ async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T,
|
|||||||
(result, duration)
|
(result, duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn noop<T: EthSpec>(
|
||||||
|
_: &ExecutionLayer<T>,
|
||||||
|
_: ExecutionPayloadRef<T>,
|
||||||
|
) -> Option<ExecutionPayload<T>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
/// Returns the duration since the unix epoch.
|
||||||
|
fn timestamp_now() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_else(|_| Duration::from_secs(0))
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum BlobTxConversionError {
|
pub enum BlobTxConversionError {
|
||||||
/// The transaction type was not set.
|
/// The transaction type was not set.
|
||||||
@ -2356,12 +2382,3 @@ mod test {
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
/// Returns the duration since the unix epoch.
|
|
||||||
fn timestamp_now() -> u64 {
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or_else(|_| Duration::from_secs(0))
|
|
||||||
.as_secs()
|
|
||||||
}
|
|
||||||
|
@ -11,11 +11,17 @@ use ethereum_consensus::{
|
|||||||
};
|
};
|
||||||
use fork_choice::ForkchoiceUpdateParameters;
|
use fork_choice::ForkchoiceUpdateParameters;
|
||||||
use mev_rs::{
|
use mev_rs::{
|
||||||
bellatrix::{BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix},
|
blinded_block_provider::Server as BlindedBlockProviderServer,
|
||||||
|
signing::{sign_builder_message, verify_signed_builder_message},
|
||||||
|
types::{
|
||||||
|
bellatrix::{
|
||||||
|
BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix,
|
||||||
|
},
|
||||||
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella},
|
capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella},
|
||||||
sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError,
|
BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock,
|
||||||
BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload,
|
SignedBuilderBid, SignedValidatorRegistration,
|
||||||
SignedBlindedBeaconBlock, SignedBuilderBid, SignedValidatorRegistration,
|
},
|
||||||
|
Error as MevError,
|
||||||
};
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
@ -47,7 +53,7 @@ pub enum Operation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Operation {
|
impl Operation {
|
||||||
fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
|
fn apply<B: BidStuff>(self, bid: &mut B) -> Result<(), MevError> {
|
||||||
match self {
|
match self {
|
||||||
Operation::FeeRecipient(fee_recipient) => {
|
Operation::FeeRecipient(fee_recipient) => {
|
||||||
*bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)?
|
*bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)?
|
||||||
@ -73,7 +79,7 @@ pub trait BidStuff {
|
|||||||
fn prev_randao_mut(&mut self) -> &mut Hash32;
|
fn prev_randao_mut(&mut self) -> &mut Hash32;
|
||||||
fn block_number_mut(&mut self) -> &mut u64;
|
fn block_number_mut(&mut self) -> &mut u64;
|
||||||
fn timestamp_mut(&mut self) -> &mut u64;
|
fn timestamp_mut(&mut self) -> &mut u64;
|
||||||
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError>;
|
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>;
|
||||||
|
|
||||||
fn sign_builder_message(
|
fn sign_builder_message(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -134,11 +140,9 @@ impl BidStuff for BuilderBid {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, BlindedBlockProviderError> {
|
fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> {
|
||||||
match self {
|
match self {
|
||||||
Self::Bellatrix(_) => Err(BlindedBlockProviderError::Custom(
|
Self::Bellatrix(_) => Err(MevError::InvalidFork),
|
||||||
"withdrawals_root called on bellatrix bid".to_string(),
|
|
||||||
)),
|
|
||||||
Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root),
|
Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -274,7 +278,7 @@ impl<E: EthSpec> MockBuilder<E> {
|
|||||||
*self.invalidate_signatures.write() = false;
|
*self.invalidate_signatures.write() = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), BlindedBlockProviderError> {
|
fn apply_operations<B: BidStuff>(&self, bid: &mut B) -> Result<(), MevError> {
|
||||||
let mut guard = self.operations.write();
|
let mut guard = self.operations.write();
|
||||||
while let Some(op) = guard.pop() {
|
while let Some(op) = guard.pop() {
|
||||||
op.apply(bid)?;
|
op.apply(bid)?;
|
||||||
@ -288,7 +292,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
async fn register_validators(
|
async fn register_validators(
|
||||||
&self,
|
&self,
|
||||||
registrations: &mut [SignedValidatorRegistration],
|
registrations: &mut [SignedValidatorRegistration],
|
||||||
) -> Result<(), BlindedBlockProviderError> {
|
) -> Result<(), MevError> {
|
||||||
for registration in registrations {
|
for registration in registrations {
|
||||||
let pubkey = registration.message.public_key.clone();
|
let pubkey = registration.message.public_key.clone();
|
||||||
let message = &mut registration.message;
|
let message = &mut registration.message;
|
||||||
@ -307,10 +311,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_best_bid(
|
async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result<SignedBuilderBid, MevError> {
|
||||||
&self,
|
|
||||||
bid_request: &BidRequest,
|
|
||||||
) -> Result<SignedBuilderBid, BlindedBlockProviderError> {
|
|
||||||
let slot = Slot::new(bid_request.slot);
|
let slot = Slot::new(bid_request.slot);
|
||||||
let fork = self.spec.fork_name_at_slot::<E>(slot);
|
let fork = self.spec.fork_name_at_slot::<E>(slot);
|
||||||
let signed_cached_data = self
|
let signed_cached_data = self
|
||||||
@ -336,7 +337,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.block_hash();
|
.block_hash();
|
||||||
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
|
if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? {
|
||||||
return Err(BlindedBlockProviderError::Custom(format!(
|
return Err(custom_err(format!(
|
||||||
"head mismatch: {} {}",
|
"head mismatch: {} {}",
|
||||||
head_execution_hash, bid_request.parent_hash
|
head_execution_hash, bid_request.parent_hash
|
||||||
)));
|
)));
|
||||||
@ -396,7 +397,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
.get_debug_beacon_states(StateId::Head)
|
.get_debug_beacon_states(StateId::Head)
|
||||||
.await
|
.await
|
||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
.ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))?
|
.ok_or_else(|| custom_err("missing head state".to_string()))?
|
||||||
.data;
|
.data;
|
||||||
let prev_randao = head_state
|
let prev_randao = head_state
|
||||||
.get_randao_mix(head_state.current_epoch())
|
.get_randao_mix(head_state.current_epoch())
|
||||||
@ -409,10 +410,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
|
PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![]))
|
||||||
}
|
}
|
||||||
ForkName::Base | ForkName::Altair => {
|
ForkName::Base | ForkName::Altair => {
|
||||||
return Err(BlindedBlockProviderError::Custom(format!(
|
return Err(MevError::InvalidFork);
|
||||||
"Unsupported fork: {}",
|
|
||||||
fork
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -452,12 +450,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
|
value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?,
|
||||||
public_key: self.builder_sk.public_key(),
|
public_key: self.builder_sk.public_key(),
|
||||||
}),
|
}),
|
||||||
ForkName::Base | ForkName::Altair | ForkName::Deneb => {
|
ForkName::Base | ForkName::Altair | ForkName::Deneb => return Err(MevError::InvalidFork),
|
||||||
return Err(BlindedBlockProviderError::Custom(format!(
|
|
||||||
"Unsupported fork: {}",
|
|
||||||
fork
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
*message.gas_limit_mut() = cached_data.gas_limit;
|
*message.gas_limit_mut() = cached_data.gas_limit;
|
||||||
|
|
||||||
@ -475,7 +468,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
async fn open_bid(
|
async fn open_bid(
|
||||||
&self,
|
&self,
|
||||||
signed_block: &mut SignedBlindedBeaconBlock,
|
signed_block: &mut SignedBlindedBeaconBlock,
|
||||||
) -> Result<ServerPayload, BlindedBlockProviderError> {
|
) -> Result<ServerPayload, MevError> {
|
||||||
let node = match signed_block {
|
let node = match signed_block {
|
||||||
SignedBlindedBeaconBlock::Bellatrix(block) => {
|
SignedBlindedBeaconBlock::Bellatrix(block) => {
|
||||||
block.message.body.execution_payload_header.hash_tree_root()
|
block.message.body.execution_payload_header.hash_tree_root()
|
||||||
@ -496,9 +489,7 @@ impl<E: EthSpec> mev_rs::BlindedBlockProvider for MockBuilder<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(
|
pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(ssz_rs_data: &T) -> Result<U, MevError> {
|
||||||
ssz_rs_data: &T,
|
|
||||||
) -> Result<U, BlindedBlockProviderError> {
|
|
||||||
U::from_ssz_bytes(
|
U::from_ssz_bytes(
|
||||||
ssz_rs::serialize(ssz_rs_data)
|
ssz_rs::serialize(ssz_rs_data)
|
||||||
.map_err(convert_err)?
|
.map_err(convert_err)?
|
||||||
@ -507,12 +498,17 @@ pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>(
|
|||||||
.map_err(convert_err)
|
.map_err(convert_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>(
|
pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>(ssz_data: &T) -> Result<U, MevError> {
|
||||||
ssz_data: &T,
|
|
||||||
) -> Result<U, BlindedBlockProviderError> {
|
|
||||||
ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err)
|
ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn convert_err<E: Debug>(e: E) -> BlindedBlockProviderError {
|
fn convert_err<E: Debug>(e: E) -> MevError {
|
||||||
BlindedBlockProviderError::Custom(format!("{e:?}"))
|
custom_err(format!("{e:?}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`.
|
||||||
|
fn custom_err(s: String) -> MevError {
|
||||||
|
MevError::Consensus(ethereum_consensus::state_transition::Error::Io(
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Other, s),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ impl<T: EthSpec> PackingEfficiencyHandler<T> {
|
|||||||
available_attestations: HashSet::new(),
|
available_attestations: HashSet::new(),
|
||||||
included_attestations: HashMap::new(),
|
included_attestations: HashMap::new(),
|
||||||
committee_store: CommitteeStore::new(),
|
committee_store: CommitteeStore::new(),
|
||||||
_phantom: PhantomData::default(),
|
_phantom: PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
handler.compute_epoch(start_epoch, &starting_state, spec)?;
|
handler.compute_epoch(start_epoch, &starting_state, spec)?;
|
||||||
|
@ -70,15 +70,32 @@ impl StateId {
|
|||||||
.map_err(BeaconChainError::DBError)
|
.map_err(BeaconChainError::DBError)
|
||||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||||
{
|
{
|
||||||
let execution_optimistic = chain
|
let finalization_status = chain
|
||||||
|
.state_finalization_and_canonicity(root, hot_summary.slot)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||||
|
let finalized = finalization_status.is_finalized();
|
||||||
|
let fork_choice = chain.canonical_head.fork_choice_read_lock();
|
||||||
|
let execution_optimistic = if finalization_status.slot_is_finalized
|
||||||
|
&& !finalization_status.canonical
|
||||||
|
{
|
||||||
|
// This block is permanently orphaned and has likely been pruned from fork
|
||||||
|
// choice. If it isn't found in fork choice, mark it optimistic to be on the
|
||||||
|
// safe side.
|
||||||
|
fork_choice
|
||||||
|
.is_optimistic_or_invalid_block_no_fallback(
|
||||||
|
&hot_summary.latest_block_root,
|
||||||
|
)
|
||||||
|
.unwrap_or(true)
|
||||||
|
} else {
|
||||||
|
// This block is either old and finalized, or recent and unfinalized, so
|
||||||
|
// it's safe to fallback to the optimistic status of the finalized block.
|
||||||
|
chain
|
||||||
.canonical_head
|
.canonical_head
|
||||||
.fork_choice_read_lock()
|
.fork_choice_read_lock()
|
||||||
.is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root)
|
.is_optimistic_or_invalid_block(&hot_summary.latest_block_root)
|
||||||
.map_err(BeaconChainError::ForkChoiceError)
|
.map_err(BeaconChainError::ForkChoiceError)
|
||||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||||
let finalized = chain
|
};
|
||||||
.is_finalized_state(root, hot_summary.slot)
|
|
||||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
||||||
return Ok((*root, execution_optimistic, finalized));
|
return Ok((*root, execution_optimistic, finalized));
|
||||||
} else if let Some(_cold_state_slot) = chain
|
} else if let Some(_cold_state_slot) = chain
|
||||||
.store
|
.store
|
||||||
|
@ -2,8 +2,9 @@
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
|
chain_config::{DisallowedReOrgOffsets, ReOrgThreshold},
|
||||||
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
||||||
|
ChainConfig,
|
||||||
};
|
};
|
||||||
use eth2::types::DepositContractData;
|
use eth2::types::{DepositContractData, StateId};
|
||||||
use execution_layer::{ForkchoiceState, PayloadAttributes};
|
use execution_layer::{ForkchoiceState, PayloadAttributes};
|
||||||
use http_api::test_utils::InteractiveTester;
|
use http_api::test_utils::InteractiveTester;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@ -17,7 +18,7 @@ use std::time::Duration;
|
|||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload,
|
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload,
|
||||||
MainnetEthSpec, ProposerPreparationData, Slot,
|
MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
type E = MainnetEthSpec;
|
type E = MainnetEthSpec;
|
||||||
@ -48,6 +49,76 @@ async fn deposit_contract_custom_network() {
|
|||||||
assert_eq!(result, expected);
|
assert_eq!(result, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that state lookups by root function correctly for states that are finalized but still
|
||||||
|
// present in the hot database, and have had their block pruned from fork choice.
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
|
async fn state_by_root_pruned_from_fork_choice() {
|
||||||
|
type E = MinimalEthSpec;
|
||||||
|
|
||||||
|
let validator_count = 24;
|
||||||
|
let spec = ForkName::latest().make_genesis_spec(E::default_spec());
|
||||||
|
|
||||||
|
let tester = InteractiveTester::<E>::new_with_initializer_and_mutator(
|
||||||
|
Some(spec.clone()),
|
||||||
|
validator_count,
|
||||||
|
Some(Box::new(move |builder| {
|
||||||
|
builder
|
||||||
|
.deterministic_keypairs(validator_count)
|
||||||
|
.fresh_ephemeral_store()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
epochs_per_migration: 1024,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
|
})),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let client = &tester.client;
|
||||||
|
let harness = &tester.harness;
|
||||||
|
|
||||||
|
// Create some chain depth and finalize beyond fork choice's pruning depth.
|
||||||
|
let num_epochs = 8_u64;
|
||||||
|
let num_initial = num_epochs * E::slots_per_epoch();
|
||||||
|
harness.advance_slot();
|
||||||
|
harness
|
||||||
|
.extend_chain_with_sync(
|
||||||
|
num_initial as usize,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
SyncCommitteeStrategy::NoValidators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Should now be finalized.
|
||||||
|
let finalized_epoch = harness.finalized_checkpoint().epoch;
|
||||||
|
assert_eq!(finalized_epoch, num_epochs - 2);
|
||||||
|
|
||||||
|
// The split slot should still be at 0.
|
||||||
|
assert_eq!(harness.chain.store.get_split_slot(), 0);
|
||||||
|
|
||||||
|
// States that are between the split and the finalized slot should be able to be looked up by
|
||||||
|
// state root.
|
||||||
|
for slot in 0..finalized_epoch.start_slot(E::slots_per_epoch()).as_u64() {
|
||||||
|
let state_root = harness
|
||||||
|
.chain
|
||||||
|
.state_root_at_slot(Slot::new(slot))
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let response = client
|
||||||
|
.get_debug_beacon_states::<E>(StateId::Root(state_root))
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(response.finalized.unwrap());
|
||||||
|
assert!(!response.execution_optimistic.unwrap());
|
||||||
|
|
||||||
|
let mut state = response.data;
|
||||||
|
assert_eq!(state.update_tree_hash_cache().unwrap(), state_root);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Data structure for tracking fork choice updates received by the mock execution layer.
|
/// Data structure for tracking fork choice updates received by the mock execution layer.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
struct ForkChoiceUpdates {
|
struct ForkChoiceUpdates {
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
//! ENR extension trait to support libp2p integration.
|
//! ENR extension trait to support libp2p integration.
|
||||||
use crate::{Enr, Multiaddr, PeerId};
|
use crate::{Enr, Multiaddr, PeerId};
|
||||||
use discv5::enr::{CombinedKey, CombinedPublicKey};
|
use discv5::enr::{CombinedKey, CombinedPublicKey};
|
||||||
use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol};
|
use libp2p::{
|
||||||
|
core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol},
|
||||||
|
identity::secp256k1,
|
||||||
|
};
|
||||||
use tiny_keccak::{Hasher, Keccak};
|
use tiny_keccak::{Hasher, Keccak};
|
||||||
|
|
||||||
/// Extend ENR for libp2p types.
|
/// Extend ENR for libp2p types.
|
||||||
@ -36,6 +39,8 @@ pub trait CombinedKeyPublicExt {
|
|||||||
pub trait CombinedKeyExt {
|
pub trait CombinedKeyExt {
|
||||||
/// Converts a libp2p key into an ENR combined key.
|
/// Converts a libp2p key into an ENR combined key.
|
||||||
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>;
|
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>;
|
||||||
|
/// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`].
|
||||||
|
fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EnrExt for Enr {
|
impl EnrExt for Enr {
|
||||||
@ -220,12 +225,7 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
|
|||||||
impl CombinedKeyExt for CombinedKey {
|
impl CombinedKeyExt for CombinedKey {
|
||||||
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
|
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
|
||||||
match key {
|
match key {
|
||||||
Keypair::Secp256k1(key) => {
|
Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)),
|
||||||
let secret =
|
|
||||||
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
|
|
||||||
.expect("libp2p key must be valid");
|
|
||||||
Ok(CombinedKey::Secp256k1(secret))
|
|
||||||
}
|
|
||||||
Keypair::Ed25519(key) => {
|
Keypair::Ed25519(key) => {
|
||||||
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
|
||||||
&(key.encode()[..32])
|
&(key.encode()[..32])
|
||||||
@ -237,6 +237,11 @@ impl CombinedKeyExt for CombinedKey {
|
|||||||
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fn from_secp256k1(key: &secp256k1::Keypair) -> Self {
|
||||||
|
let secret = discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
|
||||||
|
.expect("libp2p key must be valid");
|
||||||
|
CombinedKey::Secp256k1(secret)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p
|
// helper function to convert a peer_id to a node_id. This is only possible for secp256k1/ed25519 libp2p
|
||||||
|
@ -1101,6 +1101,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::rpc::methods::{MetaData, MetaDataV2};
|
use crate::rpc::methods::{MetaData, MetaDataV2};
|
||||||
use enr::EnrBuilder;
|
use enr::EnrBuilder;
|
||||||
|
use libp2p::identity::secp256k1;
|
||||||
use slog::{o, Drain};
|
use slog::{o, Drain};
|
||||||
use types::{BitVector, MinimalEthSpec, SubnetId};
|
use types::{BitVector, MinimalEthSpec, SubnetId};
|
||||||
|
|
||||||
@ -1119,10 +1120,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn build_discovery() -> Discovery<E> {
|
async fn build_discovery() -> Discovery<E> {
|
||||||
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
let keypair = secp256k1::Keypair::generate();
|
||||||
let mut config = NetworkConfig::default();
|
let mut config = NetworkConfig::default();
|
||||||
config.set_listening_addr(crate::ListenAddress::unused_v4_ports());
|
config.set_listening_addr(crate::ListenAddress::unused_v4_ports());
|
||||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap();
|
let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair);
|
||||||
let enr: Enr = build_enr::<E>(&enr_key, &config, &EnrForkId::default()).unwrap();
|
let enr: Enr = build_enr::<E>(&enr_key, &config, &EnrForkId::default()).unwrap();
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
let globals = NetworkGlobals::new(
|
let globals = NetworkGlobals::new(
|
||||||
@ -1138,6 +1139,7 @@ mod tests {
|
|||||||
false,
|
false,
|
||||||
&log,
|
&log,
|
||||||
);
|
);
|
||||||
|
let keypair = Keypair::Secp256k1(keypair);
|
||||||
Discovery::new(&keypair, &config, Arc::new(globals), &log)
|
Discovery::new(&keypair, &config, Arc::new(globals), &log)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -1184,8 +1186,8 @@ mod tests {
|
|||||||
|
|
||||||
fn make_enr(subnet_ids: Vec<usize>) -> Enr {
|
fn make_enr(subnet_ids: Vec<usize>) -> Enr {
|
||||||
let mut builder = EnrBuilder::new("v4");
|
let mut builder = EnrBuilder::new("v4");
|
||||||
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
let keypair = secp256k1::Keypair::generate();
|
||||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap();
|
let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair);
|
||||||
|
|
||||||
// set the "attnets" field on our ENR
|
// set the "attnets" field on our ENR
|
||||||
let mut bitfield = BitVector::<ssz_types::typenum::U64>::new();
|
let mut bitfield = BitVector::<ssz_types::typenum::U64>::new();
|
||||||
|
@ -134,9 +134,8 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
|||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> NetworkGlobals<TSpec> {
|
) -> NetworkGlobals<TSpec> {
|
||||||
use crate::CombinedKeyExt;
|
use crate::CombinedKeyExt;
|
||||||
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
let keypair = libp2p::identity::secp256k1::Keypair::generate();
|
||||||
let enr_key: discv5::enr::CombinedKey =
|
let enr_key: discv5::enr::CombinedKey = discv5::enr::CombinedKey::from_secp256k1(&keypair);
|
||||||
discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap();
|
|
||||||
let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap();
|
let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap();
|
||||||
NetworkGlobals::new(
|
NetworkGlobals::new(
|
||||||
enr,
|
enr,
|
||||||
|
@ -540,6 +540,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
[default: 8192 (mainnet) or 64 (minimal)]")
|
[default: 8192 (mainnet) or 64 (minimal)]")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("epochs-per-migration")
|
||||||
|
.long("epochs-per-migration")
|
||||||
|
.value_name("N")
|
||||||
|
.help("The number of epochs to wait between running the migration of data from the \
|
||||||
|
hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \
|
||||||
|
writes")
|
||||||
|
.default_value("1")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("block-cache-size")
|
Arg::with_name("block-cache-size")
|
||||||
.long("block-cache-size")
|
.long("block-cache-size")
|
||||||
|
@ -421,6 +421,12 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.store.prune_payloads = prune_payloads;
|
client_config.store.prune_payloads = prune_payloads;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(epochs_per_migration) =
|
||||||
|
clap_utils::parse_optional(cli_args, "epochs-per-migration")?
|
||||||
|
{
|
||||||
|
client_config.chain.epochs_per_migration = epochs_per_migration;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? {
|
if let Some(prune_blobs) = clap_utils::parse_optional(cli_args, "prune-blobs")? {
|
||||||
client_config.store.prune_blobs = prune_blobs;
|
client_config.store.prune_blobs = prune_blobs;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#![cfg_attr(
|
#![cfg_attr(
|
||||||
not(test),
|
not(test),
|
||||||
deny(
|
deny(
|
||||||
clippy::integer_arithmetic,
|
clippy::arithmetic_side_effects,
|
||||||
clippy::disallowed_methods,
|
clippy::disallowed_methods,
|
||||||
clippy::indexing_slicing,
|
clippy::indexing_slicing,
|
||||||
clippy::unwrap_used,
|
clippy::unwrap_used,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::arithmetic_side_effects)]
|
||||||
|
|
||||||
use super::signature_sets::{Error as SignatureSetError, *};
|
use super::signature_sets::{Error as SignatureSetError, *};
|
||||||
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
||||||
|
@ -960,7 +960,7 @@ async fn fork_spanning_exit() {
|
|||||||
spec.bellatrix_fork_epoch = Some(Epoch::new(4));
|
spec.bellatrix_fork_epoch = Some(Epoch::new(4));
|
||||||
spec.shard_committee_period = 0;
|
spec.shard_committee_period = 0;
|
||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec::default())
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -34,7 +34,7 @@ pub fn process_inactivity_updates<T: EthSpec>(
|
|||||||
.safe_add_assign(spec.inactivity_score_bias)?;
|
.safe_add_assign(spec.inactivity_score_bias)?;
|
||||||
}
|
}
|
||||||
// Decrease the score of all validators for forgiveness when not during a leak
|
// Decrease the score of all validators for forgiveness when not during a leak
|
||||||
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
if !state.is_in_inactivity_leak(previous_epoch, spec)? {
|
||||||
let inactivity_score = state.get_inactivity_score_mut(index)?;
|
let inactivity_score = state.get_inactivity_score_mut(index)?;
|
||||||
inactivity_score
|
inactivity_score
|
||||||
.safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?;
|
.safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?;
|
||||||
|
@ -77,7 +77,7 @@ pub fn get_flag_index_deltas<T: EthSpec>(
|
|||||||
let mut delta = Delta::default();
|
let mut delta = Delta::default();
|
||||||
|
|
||||||
if unslashed_participating_indices.contains(index)? {
|
if unslashed_participating_indices.contains(index)? {
|
||||||
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
if !state.is_in_inactivity_leak(previous_epoch, spec)? {
|
||||||
let reward_numerator = base_reward
|
let reward_numerator = base_reward
|
||||||
.safe_mul(weight)?
|
.safe_mul(weight)?
|
||||||
.safe_mul(unslashed_participating_increments)?;
|
.safe_mul(unslashed_participating_increments)?;
|
||||||
|
@ -138,7 +138,7 @@ impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit {
|
|||||||
Ok(SigVerifiedOp::new(self, state))
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
||||||
smallvec![self.message.epoch]
|
smallvec![self.message.epoch]
|
||||||
}
|
}
|
||||||
@ -156,7 +156,7 @@ impl<E: EthSpec> VerifyOperation<E> for AttesterSlashing<E> {
|
|||||||
Ok(SigVerifiedOp::new(self, state))
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
||||||
smallvec![
|
smallvec![
|
||||||
self.attestation_1.data.target.epoch,
|
self.attestation_1.data.target.epoch,
|
||||||
@ -177,7 +177,7 @@ impl<E: EthSpec> VerifyOperation<E> for ProposerSlashing {
|
|||||||
Ok(SigVerifiedOp::new(self, state))
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
||||||
// Only need a single epoch because the slots of the two headers must be equal.
|
// Only need a single epoch because the slots of the two headers must be equal.
|
||||||
smallvec![self
|
smallvec![self
|
||||||
@ -200,7 +200,7 @@ impl<E: EthSpec> VerifyOperation<E> for SignedBlsToExecutionChange {
|
|||||||
Ok(SigVerifiedOp::new(self, state))
|
Ok(SigVerifiedOp::new(self, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> {
|
||||||
smallvec![]
|
smallvec![]
|
||||||
}
|
}
|
||||||
|
@ -455,7 +455,7 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Specialised deserialisation method that uses the `ChainSpec` as context.
|
/// Specialised deserialisation method that uses the `ChainSpec` as context.
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
|
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
|
||||||
// Slot is after genesis_time (u64) and genesis_validators_root (Hash256).
|
// Slot is after genesis_time (u64) and genesis_validators_root (Hash256).
|
||||||
let slot_start = <u64 as Decode>::ssz_fixed_len() + <Hash256 as Decode>::ssz_fixed_len();
|
let slot_start = <u64 as Decode>::ssz_fixed_len() + <Hash256 as Decode>::ssz_fixed_len();
|
||||||
@ -1761,16 +1761,22 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
previous_epoch: Epoch,
|
previous_epoch: Epoch,
|
||||||
val_index: usize,
|
val_index: usize,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
self.get_validator(val_index).map(|val| {
|
let val = self.get_validator(val_index)?;
|
||||||
val.is_active_at(previous_epoch)
|
Ok(val.is_active_at(previous_epoch)
|
||||||
|| (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch)
|
|| (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch))
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Passing `previous_epoch` to this function rather than computing it internally provides
|
/// Passing `previous_epoch` to this function rather than computing it internally provides
|
||||||
/// a tangible speed improvement in state processing.
|
/// a tangible speed improvement in state processing.
|
||||||
pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool {
|
pub fn is_in_inactivity_leak(
|
||||||
(previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty
|
&self,
|
||||||
|
previous_epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<bool, safe_arith::ArithError> {
|
||||||
|
Ok(
|
||||||
|
(previous_epoch.safe_sub(self.finalized_checkpoint().epoch)?)
|
||||||
|
> spec.min_epochs_to_inactivity_penalty,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the `SyncCommittee` associated with the next slot. Useful because sync committees
|
/// Get the `SyncCommittee` associated with the next slot. Useful because sync committees
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::arithmetic_side_effects)]
|
||||||
|
|
||||||
use super::BeaconState;
|
use super::BeaconState;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::arithmetic_side_effects)]
|
||||||
#![allow(clippy::disallowed_methods)]
|
#![allow(clippy::disallowed_methods)]
|
||||||
#![allow(clippy::indexing_slicing)]
|
#![allow(clippy::indexing_slicing)]
|
||||||
|
|
||||||
|
@ -468,7 +468,7 @@ impl ChainSpec {
|
|||||||
Hash256::from(domain)
|
Hash256::from(domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
pub const fn attestation_subnet_prefix_bits(&self) -> u32 {
|
pub const fn attestation_subnet_prefix_bits(&self) -> u32 {
|
||||||
let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2();
|
let attestation_subnet_count_bits = self.attestation_subnet_count.ilog2();
|
||||||
self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits
|
self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits
|
||||||
|
@ -115,7 +115,7 @@ impl<T: EthSpec> ExecutionPayload<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
/// Returns the maximum size of an execution payload.
|
/// Returns the maximum size of an execution payload.
|
||||||
pub fn max_execution_payload_merge_size() -> usize {
|
pub fn max_execution_payload_merge_size() -> usize {
|
||||||
// Fixed part
|
// Fixed part
|
||||||
@ -126,7 +126,7 @@ impl<T: EthSpec> ExecutionPayload<T> {
|
|||||||
+ (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction()))
|
+ (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
/// Returns the maximum size of an execution payload.
|
/// Returns the maximum size of an execution payload.
|
||||||
pub fn max_execution_payload_capella_size() -> usize {
|
pub fn max_execution_payload_capella_size() -> usize {
|
||||||
// Fixed part
|
// Fixed part
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
#![cfg_attr(
|
#![cfg_attr(
|
||||||
not(test),
|
not(test),
|
||||||
deny(
|
deny(
|
||||||
clippy::integer_arithmetic,
|
clippy::arithmetic_side_effects,
|
||||||
clippy::disallowed_methods,
|
clippy::disallowed_methods,
|
||||||
clippy::indexing_slicing
|
clippy::indexing_slicing
|
||||||
)
|
)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::arithmetic_side_effects)]
|
||||||
|
|
||||||
use crate::{Hash256, ParticipationFlags, Unsigned, VariableList};
|
use crate::{Hash256, ParticipationFlags, Unsigned, VariableList};
|
||||||
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache};
|
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache};
|
||||||
|
@ -72,7 +72,7 @@ impl SubnetId {
|
|||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::integer_arithmetic)]
|
#[allow(clippy::arithmetic_side_effects)]
|
||||||
/// Computes the set of subnets the node should be subscribed to during the current epoch,
|
/// Computes the set of subnets the node should be subscribed to during the current epoch,
|
||||||
/// along with the first epoch in which these subscriptions are no longer valid.
|
/// along with the first epoch in which these subscriptions are no longer valid.
|
||||||
pub fn compute_subnets_for_epoch<T: EthSpec>(
|
pub fn compute_subnets_for_epoch<T: EthSpec>(
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::arithmetic_side_effects)]
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ pub trait TestRandom {
|
|||||||
|
|
||||||
impl<T> TestRandom for PhantomData<T> {
|
impl<T> TestRandom for PhantomData<T> {
|
||||||
fn random_for_test(_rng: &mut impl RngCore) -> Self {
|
fn random_for_test(_rng: &mut impl RngCore) -> Self {
|
||||||
PhantomData::default()
|
PhantomData
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
discovery::{build_enr, CombinedKey, CombinedKeyExt, Keypair, ENR_FILENAME},
|
discovery::{build_enr, CombinedKey, CombinedKeyExt, ENR_FILENAME},
|
||||||
|
libp2p::identity::secp256k1,
|
||||||
NetworkConfig, NETWORK_KEY_FILENAME,
|
NetworkConfig, NETWORK_KEY_FILENAME,
|
||||||
};
|
};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@ -29,8 +30,8 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
config.enr_udp4_port = Some(udp_port);
|
config.enr_udp4_port = Some(udp_port);
|
||||||
config.enr_tcp6_port = Some(tcp_port);
|
config.enr_tcp6_port = Some(tcp_port);
|
||||||
|
|
||||||
let local_keypair = Keypair::generate_secp256k1();
|
let secp256k1_keypair = secp256k1::Keypair::generate();
|
||||||
let enr_key = CombinedKey::from_libp2p(&local_keypair)?;
|
let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair);
|
||||||
let enr_fork_id = EnrForkId {
|
let enr_fork_id = EnrForkId {
|
||||||
fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()),
|
fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()),
|
||||||
next_fork_version: genesis_fork_version,
|
next_fork_version: genesis_fork_version,
|
||||||
@ -47,13 +48,10 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
.write_all(enr.to_base64().as_bytes())
|
.write_all(enr.to_base64().as_bytes())
|
||||||
.map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?;
|
.map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?;
|
||||||
|
|
||||||
let secret_bytes = match local_keypair {
|
|
||||||
Keypair::Secp256k1(key) => key.secret().to_bytes(),
|
|
||||||
_ => return Err("Key is not a secp256k1 key".into()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME))
|
let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME))
|
||||||
.map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?;
|
.map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?;
|
||||||
|
|
||||||
|
let secret_bytes = secp256k1_keypair.secret().to_bytes();
|
||||||
key_file
|
key_file
|
||||||
.write_all(&secret_bytes)
|
.write_all(&secret_bytes)
|
||||||
.map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?;
|
.map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?;
|
||||||
|
@ -40,7 +40,6 @@ fn main() {
|
|||||||
.long("spec")
|
.long("spec")
|
||||||
.value_name("STRING")
|
.value_name("STRING")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.required(true)
|
|
||||||
.possible_values(&["minimal", "mainnet", "gnosis"])
|
.possible_values(&["minimal", "mainnet", "gnosis"])
|
||||||
.default_value("mainnet")
|
.default_value("mainnet")
|
||||||
.global(true),
|
.global(true),
|
||||||
@ -372,7 +371,6 @@ fn main() {
|
|||||||
.index(2)
|
.index(2)
|
||||||
.value_name("BIP39_MNENMONIC")
|
.value_name("BIP39_MNENMONIC")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.required(true)
|
|
||||||
.default_value(
|
.default_value(
|
||||||
"replace nephew blur decorate waste convince soup column \
|
"replace nephew blur decorate waste convince soup column \
|
||||||
orient excite play baby",
|
orient excite play baby",
|
||||||
@ -393,7 +391,6 @@ fn main() {
|
|||||||
.help("The block hash used when generating an execution payload. This \
|
.help("The block hash used when generating an execution payload. This \
|
||||||
value is used for `execution_payload_header.block_hash` as well as \
|
value is used for `execution_payload_header.block_hash` as well as \
|
||||||
`execution_payload_header.random`")
|
`execution_payload_header.random`")
|
||||||
.required(true)
|
|
||||||
.default_value(
|
.default_value(
|
||||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
),
|
),
|
||||||
@ -411,7 +408,6 @@ fn main() {
|
|||||||
.value_name("INTEGER")
|
.value_name("INTEGER")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("The base fee per gas field in the execution payload generated.")
|
.help("The base fee per gas field in the execution payload generated.")
|
||||||
.required(true)
|
|
||||||
.default_value("1000000000"),
|
.default_value("1000000000"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
@ -420,7 +416,6 @@ fn main() {
|
|||||||
.value_name("INTEGER")
|
.value_name("INTEGER")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("The gas limit field in the execution payload generated.")
|
.help("The gas limit field in the execution payload generated.")
|
||||||
.required(true)
|
|
||||||
.default_value("30000000"),
|
.default_value("30000000"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
|
@ -1772,6 +1772,24 @@ fn no_reconstruct_historic_states_flag() {
|
|||||||
.run_with_zero_port()
|
.run_with_zero_port()
|
||||||
.with_config(|config| assert!(!config.chain.reconstruct_historic_states));
|
.with_config(|config| assert!(!config.chain.reconstruct_historic_states));
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn epochs_per_migration_default() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config.chain.epochs_per_migration,
|
||||||
|
beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_MIGRATION
|
||||||
|
)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn epochs_per_migration_override() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("epochs-per-migration", Some("128"))
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| assert_eq!(config.chain.epochs_per_migration, 128));
|
||||||
|
}
|
||||||
|
|
||||||
// Tests for Slasher flags.
|
// Tests for Slasher flags.
|
||||||
// Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342
|
// Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342
|
||||||
|
@ -9,5 +9,5 @@ set -Eeuo pipefail
|
|||||||
source ./vars.env
|
source ./vars.env
|
||||||
|
|
||||||
if [ -d $DATADIR ]; then
|
if [ -d $DATADIR ]; then
|
||||||
rm -r $DATADIR
|
rm -rf $DATADIR
|
||||||
fi
|
fi
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#![cfg(all(feature = "lmdb"))]
|
#![cfg(feature = "lmdb")]
|
||||||
|
|
||||||
use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride};
|
use slasher::{config::MDBX_DATA_FILENAME, Config, DatabaseBackend, DatabaseBackendOverride};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
@ -13,7 +13,7 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST))
|
|||||||
BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME)
|
BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME)
|
||||||
BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG)
|
BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG)
|
||||||
|
|
||||||
WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget)
|
CURL := $(if $(LIGHTHOUSE_GITHUB_TOKEN),curl -L --header "Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",curl -L)
|
||||||
|
|
||||||
all:
|
all:
|
||||||
make $(OUTPUT_DIR)
|
make $(OUTPUT_DIR)
|
||||||
@ -27,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS)
|
|||||||
|
|
||||||
$(BLS_OUTPUT_DIR):
|
$(BLS_OUTPUT_DIR):
|
||||||
mkdir $(BLS_OUTPUT_DIR)
|
mkdir $(BLS_OUTPUT_DIR)
|
||||||
$(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL)
|
$(CURL) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -o $(BLS_TARBALL)
|
||||||
tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR)
|
tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR)
|
||||||
|
|
||||||
%-$(TESTS_TAG).tar.gz:
|
%-$(TESTS_TAG).tar.gz:
|
||||||
$(WGET) $(BASE_URL)/$*.tar.gz -O $@
|
$(CURL) $(BASE_URL)/$*.tar.gz -o $@
|
||||||
|
|
||||||
clean-test-files:
|
clean-test-files:
|
||||||
rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR)
|
rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR)
|
||||||
|
@ -14,3 +14,4 @@ validator_client = { path = "../../validator_client" }
|
|||||||
validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] }
|
validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] }
|
||||||
sensitive_url = { path = "../../common/sensitive_url" }
|
sensitive_url = { path = "../../common/sensitive_url" }
|
||||||
execution_layer = { path = "../../beacon_node/execution_layer" }
|
execution_layer = { path = "../../beacon_node/execution_layer" }
|
||||||
|
tokio = { version = "1.14.0", features = ["time"] }
|
||||||
|
@ -10,6 +10,7 @@ use std::path::PathBuf;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use tempfile::{Builder as TempBuilder, TempDir};
|
use tempfile::{Builder as TempBuilder, TempDir};
|
||||||
|
use tokio::time::timeout;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
use validator_client::ProductionValidatorClient;
|
use validator_client::ProductionValidatorClient;
|
||||||
use validator_dir::insecure_keys::build_deterministic_validator_dirs;
|
use validator_dir::insecure_keys::build_deterministic_validator_dirs;
|
||||||
@ -24,6 +25,8 @@ pub use validator_client::Config as ValidatorConfig;
|
|||||||
|
|
||||||
/// The global timeout for HTTP requests to the beacon node.
|
/// The global timeout for HTTP requests to the beacon node.
|
||||||
const HTTP_TIMEOUT: Duration = Duration::from_secs(4);
|
const HTTP_TIMEOUT: Duration = Duration::from_secs(4);
|
||||||
|
/// The timeout for a beacon node to start up.
|
||||||
|
const STARTUP_TIMEOUT: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
/// Provides a beacon node that is running in the current process on a given tokio executor (it
|
/// Provides a beacon node that is running in the current process on a given tokio executor (it
|
||||||
/// is _local_ to this process).
|
/// is _local_ to this process).
|
||||||
@ -51,8 +54,12 @@ impl<E: EthSpec> LocalBeaconNode<E> {
|
|||||||
client_config.set_data_dir(datadir.path().into());
|
client_config.set_data_dir(datadir.path().into());
|
||||||
client_config.network.network_dir = PathBuf::from(datadir.path()).join("network");
|
client_config.network.network_dir = PathBuf::from(datadir.path()).join("network");
|
||||||
|
|
||||||
ProductionBeaconNode::new(context, client_config)
|
timeout(
|
||||||
|
STARTUP_TIMEOUT,
|
||||||
|
ProductionBeaconNode::new(context, client_config),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
|
.map_err(|_| format!("Beacon node startup timed out after {:?}", STARTUP_TIMEOUT))?
|
||||||
.map(move |client| Self {
|
.map(move |client| Self {
|
||||||
client: client.into_inner(),
|
client: client.into_inner(),
|
||||||
datadir,
|
datadir,
|
||||||
|
@ -25,8 +25,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("4")
|
.default_value("4")
|
||||||
.help("Number of beacon nodes"))
|
.help("Number of beacon nodes"))
|
||||||
.arg(Arg::with_name("proposer-nodes")
|
.arg(Arg::with_name("proposer-nodes")
|
||||||
.short("n")
|
.short("p")
|
||||||
.long("nodes")
|
.long("proposer_nodes")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.default_value("2")
|
.default_value("2")
|
||||||
.help("Number of proposer-only beacon nodes"))
|
.help("Number of proposer-only beacon nodes"))
|
||||||
@ -64,8 +64,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("4")
|
.default_value("4")
|
||||||
.help("Number of beacon nodes"))
|
.help("Number of beacon nodes"))
|
||||||
.arg(Arg::with_name("proposer-nodes")
|
.arg(Arg::with_name("proposer-nodes")
|
||||||
.short("n")
|
.short("p")
|
||||||
.long("nodes")
|
.long("proposer_nodes")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.default_value("2")
|
.default_value("2")
|
||||||
.help("Number of proposer-only beacon nodes"))
|
.help("Number of proposer-only beacon nodes"))
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY};
|
use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY};
|
||||||
use crate::{checks, LocalNetwork, E};
|
use crate::{checks, LocalNetwork};
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
|
use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
|
||||||
use eth1_test_rig::AnvilEth1Instance;
|
use eth1_test_rig::AnvilEth1Instance;
|
||||||
|
|
||||||
|
use crate::retry::with_retry;
|
||||||
use execution_layer::http::deposit_methods::Eth1Id;
|
use execution_layer::http::deposit_methods::Eth1Id;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
|
use node_test_rig::environment::RuntimeContext;
|
||||||
use node_test_rig::{
|
use node_test_rig::{
|
||||||
environment::{EnvironmentBuilder, LoggerConfig},
|
environment::{EnvironmentBuilder, LoggerConfig},
|
||||||
testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles,
|
testing_client_config, testing_validator_config, ClientConfig, ClientGenesis, ValidatorFiles,
|
||||||
};
|
};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
@ -107,71 +109,24 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
let context = env.core_context();
|
let context = env.core_context();
|
||||||
|
|
||||||
let main_future = async {
|
let main_future = async {
|
||||||
/*
|
|
||||||
* Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit
|
|
||||||
* validators.
|
|
||||||
*/
|
|
||||||
let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?;
|
|
||||||
let deposit_contract = anvil_eth1_instance.deposit_contract;
|
|
||||||
let chain_id = anvil_eth1_instance.anvil.chain_id();
|
|
||||||
let anvil = anvil_eth1_instance.anvil;
|
|
||||||
let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str())
|
|
||||||
.expect("Unable to parse anvil endpoint.");
|
|
||||||
let deposit_contract_address = deposit_contract.address();
|
|
||||||
|
|
||||||
// Start a timer that produces eth1 blocks on an interval.
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut interval = tokio::time::interval(eth1_block_time);
|
|
||||||
loop {
|
|
||||||
interval.tick().await;
|
|
||||||
let _ = anvil.evm_mine().await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Submit deposits to the deposit contract.
|
|
||||||
tokio::spawn(async move {
|
|
||||||
for i in 0..total_validator_count {
|
|
||||||
println!("Submitting deposit for validator {}...", i);
|
|
||||||
let _ = deposit_contract
|
|
||||||
.deposit_deterministic_async::<E>(i, deposit_amount)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut beacon_config = testing_client_config();
|
|
||||||
|
|
||||||
beacon_config.genesis = ClientGenesis::DepositContract;
|
|
||||||
beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint);
|
|
||||||
beacon_config.eth1.deposit_contract_address = deposit_contract_address;
|
|
||||||
beacon_config.eth1.deposit_contract_deploy_block = 0;
|
|
||||||
beacon_config.eth1.lowest_cached_block_number = 0;
|
|
||||||
beacon_config.eth1.follow_distance = 1;
|
|
||||||
beacon_config.eth1.node_far_behind_seconds = 20;
|
|
||||||
beacon_config.dummy_eth1_backend = false;
|
|
||||||
beacon_config.sync_eth1_chain = true;
|
|
||||||
beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64;
|
|
||||||
beacon_config.eth1.chain_id = Eth1Id::from(chain_id);
|
|
||||||
beacon_config.network.target_peers = node_count + proposer_nodes - 1;
|
|
||||||
|
|
||||||
beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None);
|
|
||||||
|
|
||||||
if post_merge_sim {
|
|
||||||
let el_config = execution_layer::Config {
|
|
||||||
execution_endpoints: vec![SensitiveUrl::parse(&format!(
|
|
||||||
"http://localhost:{}",
|
|
||||||
EXECUTION_PORT
|
|
||||||
))
|
|
||||||
.unwrap()],
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
beacon_config.execution_layer = Some(el_config);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create a new `LocalNetwork` with one beacon node.
|
* Create a new `LocalNetwork` with one beacon node.
|
||||||
*/
|
*/
|
||||||
let network = LocalNetwork::new(context.clone(), beacon_config.clone()).await?;
|
let max_retries = 3;
|
||||||
|
let (network, beacon_config) = with_retry(max_retries, || {
|
||||||
|
Box::pin(create_local_network(
|
||||||
|
LocalNetworkParams {
|
||||||
|
eth1_block_time,
|
||||||
|
total_validator_count,
|
||||||
|
deposit_amount,
|
||||||
|
node_count,
|
||||||
|
proposer_nodes,
|
||||||
|
post_merge_sim,
|
||||||
|
},
|
||||||
|
context.clone(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One by one, add beacon nodes to the network.
|
* One by one, add beacon nodes to the network.
|
||||||
@ -341,3 +296,88 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct LocalNetworkParams {
|
||||||
|
eth1_block_time: Duration,
|
||||||
|
total_validator_count: usize,
|
||||||
|
deposit_amount: u64,
|
||||||
|
node_count: usize,
|
||||||
|
proposer_nodes: usize,
|
||||||
|
post_merge_sim: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_local_network<E: EthSpec>(
|
||||||
|
LocalNetworkParams {
|
||||||
|
eth1_block_time,
|
||||||
|
total_validator_count,
|
||||||
|
deposit_amount,
|
||||||
|
node_count,
|
||||||
|
proposer_nodes,
|
||||||
|
post_merge_sim,
|
||||||
|
}: LocalNetworkParams,
|
||||||
|
context: RuntimeContext<E>,
|
||||||
|
) -> Result<(LocalNetwork<E>, ClientConfig), String> {
|
||||||
|
/*
|
||||||
|
* Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit
|
||||||
|
* validators.
|
||||||
|
*/
|
||||||
|
let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?;
|
||||||
|
let deposit_contract = anvil_eth1_instance.deposit_contract;
|
||||||
|
let chain_id = anvil_eth1_instance.anvil.chain_id();
|
||||||
|
let anvil = anvil_eth1_instance.anvil;
|
||||||
|
let eth1_endpoint =
|
||||||
|
SensitiveUrl::parse(anvil.endpoint().as_str()).expect("Unable to parse anvil endpoint.");
|
||||||
|
let deposit_contract_address = deposit_contract.address();
|
||||||
|
|
||||||
|
// Start a timer that produces eth1 blocks on an interval.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut interval = tokio::time::interval(eth1_block_time);
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
let _ = anvil.evm_mine().await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Submit deposits to the deposit contract.
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for i in 0..total_validator_count {
|
||||||
|
println!("Submitting deposit for validator {}...", i);
|
||||||
|
let _ = deposit_contract
|
||||||
|
.deposit_deterministic_async::<E>(i, deposit_amount)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut beacon_config = testing_client_config();
|
||||||
|
|
||||||
|
beacon_config.genesis = ClientGenesis::DepositContract;
|
||||||
|
beacon_config.eth1.endpoint = Eth1Endpoint::NoAuth(eth1_endpoint);
|
||||||
|
beacon_config.eth1.deposit_contract_address = deposit_contract_address;
|
||||||
|
beacon_config.eth1.deposit_contract_deploy_block = 0;
|
||||||
|
beacon_config.eth1.lowest_cached_block_number = 0;
|
||||||
|
beacon_config.eth1.follow_distance = 1;
|
||||||
|
beacon_config.eth1.node_far_behind_seconds = 20;
|
||||||
|
beacon_config.dummy_eth1_backend = false;
|
||||||
|
beacon_config.sync_eth1_chain = true;
|
||||||
|
beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64;
|
||||||
|
beacon_config.eth1.chain_id = Eth1Id::from(chain_id);
|
||||||
|
beacon_config.network.target_peers = node_count + proposer_nodes - 1;
|
||||||
|
|
||||||
|
beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None);
|
||||||
|
|
||||||
|
if post_merge_sim {
|
||||||
|
let el_config = execution_layer::Config {
|
||||||
|
execution_endpoints: vec![SensitiveUrl::parse(&format!(
|
||||||
|
"http://localhost:{}",
|
||||||
|
EXECUTION_PORT
|
||||||
|
))
|
||||||
|
.unwrap()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
beacon_config.execution_layer = Some(el_config);
|
||||||
|
}
|
||||||
|
|
||||||
|
let network = LocalNetwork::new(context, beacon_config.clone()).await?;
|
||||||
|
Ok((network, beacon_config))
|
||||||
|
}
|
||||||
|
@ -21,6 +21,7 @@ mod cli;
|
|||||||
mod eth1_sim;
|
mod eth1_sim;
|
||||||
mod local_network;
|
mod local_network;
|
||||||
mod no_eth1_sim;
|
mod no_eth1_sim;
|
||||||
|
mod retry;
|
||||||
mod sync_sim;
|
mod sync_sim;
|
||||||
|
|
||||||
use cli::cli_app;
|
use cli::cli_app;
|
||||||
|
63
testing/simulator/src/retry.rs
Normal file
63
testing/simulator/src/retry.rs
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
/// Executes the function with a specified number of retries if the function returns an error.
|
||||||
|
/// Once it exceeds `max_retries` and still fails, the error is returned.
|
||||||
|
pub async fn with_retry<T, E, F>(max_retries: usize, mut func: F) -> Result<T, E>
|
||||||
|
where
|
||||||
|
F: FnMut() -> Pin<Box<dyn Future<Output = Result<T, E>>>>,
|
||||||
|
E: Debug,
|
||||||
|
{
|
||||||
|
let mut retry_count = 0;
|
||||||
|
loop {
|
||||||
|
let result = Box::pin(func()).await;
|
||||||
|
if result.is_ok() || retry_count >= max_retries {
|
||||||
|
break result;
|
||||||
|
}
|
||||||
|
retry_count += 1;
|
||||||
|
|
||||||
|
if let Err(e) = result {
|
||||||
|
eprintln!(
|
||||||
|
"Operation failed with error {:?}, retrying {} of {}",
|
||||||
|
e, retry_count, max_retries
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
|
||||||
|
async fn my_async_func(is_ok: bool) -> Result<(), ()> {
|
||||||
|
if is_ok {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_with_retry_ok() {
|
||||||
|
let res = with_retry(3, || Box::pin(my_async_func(true))).await;
|
||||||
|
assert!(res.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_with_retry_2nd_ok() {
|
||||||
|
let mut mock_results = VecDeque::from([false, true]);
|
||||||
|
let res = with_retry(3, || {
|
||||||
|
Box::pin(my_async_func(mock_results.pop_front().unwrap()))
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
assert!(res.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_with_retry_fail() {
|
||||||
|
let res = with_retry(3, || Box::pin(my_async_func(false))).await;
|
||||||
|
assert!(res.is_err());
|
||||||
|
}
|
||||||
|
}
|
@ -21,7 +21,7 @@ types = { path = "../consensus/types" }
|
|||||||
eth2 = { path = "../common/eth2" }
|
eth2 = { path = "../common/eth2" }
|
||||||
beacon_node = { path = "../beacon_node"}
|
beacon_node = { path = "../beacon_node"}
|
||||||
tokio = { version = "1.14.0", features = ["time"] }
|
tokio = { version = "1.14.0", features = ["time"] }
|
||||||
axum = "0.5.15"
|
axum = "0.6.18"
|
||||||
hyper = "0.14.20"
|
hyper = "0.14.20"
|
||||||
serde = "1.0.116"
|
serde = "1.0.116"
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
|
@ -5,7 +5,6 @@ use crate::config::Config as FullConfig;
|
|||||||
use crate::database::{self, PgPool};
|
use crate::database::{self, PgPool};
|
||||||
use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes};
|
use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes};
|
||||||
use axum::{
|
use axum::{
|
||||||
handler::Handler,
|
|
||||||
http::{StatusCode, Uri},
|
http::{StatusCode, Uri},
|
||||||
routing::get,
|
routing::get,
|
||||||
Extension, Json, Router,
|
Extension, Json, Router,
|
||||||
@ -104,7 +103,7 @@ pub fn start_server(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let app = routes
|
let app = routes
|
||||||
.fallback(route_not_found.into_service())
|
.fallback(route_not_found)
|
||||||
.layer(Extension(pool))
|
.layer(Extension(pool))
|
||||||
.layer(Extension(slots_per_epoch));
|
.layer(Extension(slots_per_epoch));
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user