merge upstream, fix compile errors
This commit is contained in:
commit
438126f19a
2
.github/workflows/local-testnet.yml
vendored
2
.github/workflows/local-testnet.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
|
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@ -8,8 +8,8 @@ on:
|
|||||||
env:
|
env:
|
||||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||||
REPO_NAME: sigp/lighthouse
|
REPO_NAME: ${{ github.repository_owner }}/lighthouse
|
||||||
IMAGE_NAME: sigp/lighthouse
|
IMAGE_NAME: ${{ github.repository_owner }}/lighthouse
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
extract-version:
|
extract-version:
|
||||||
@ -63,12 +63,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Build toolchain
|
- name: Get latest version of stable Rust
|
||||||
uses: actions-rs/toolchain@v1
|
run: rustup update stable
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
profile: minimal
|
|
||||||
override: true
|
|
||||||
|
|
||||||
# ==============================
|
# ==============================
|
||||||
# Windows dependencies
|
# Windows dependencies
|
||||||
@ -88,7 +84,7 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
|
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
@ -179,13 +175,13 @@ jobs:
|
|||||||
# =======================================================================
|
# =======================================================================
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||||
|
|
||||||
- name: Upload signature
|
- name: Upload signature
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||||
@ -208,7 +204,7 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v3
|
||||||
|
|
||||||
# ==============================
|
# ==============================
|
||||||
# Create release draft
|
# Create release draft
|
||||||
@ -216,11 +212,14 @@ jobs:
|
|||||||
|
|
||||||
- name: Generate Full Changelog
|
- name: Generate Full Changelog
|
||||||
id: changelog
|
id: changelog
|
||||||
run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT
|
run: |
|
||||||
|
echo "CHANGELOG<<EOF" >> $GITHUB_OUTPUT
|
||||||
|
echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT
|
||||||
|
echo "EOF" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Create Release Draft
|
- name: Create Release Draft
|
||||||
env:
|
env:
|
||||||
GITHUB_USER: sigp
|
GITHUB_USER: ${{ github.repository_owner }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
# The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
|
# The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
|
||||||
|
36
.github/workflows/test-suite.yml
vendored
36
.github/workflows/test-suite.yml
vendored
@ -13,6 +13,8 @@ env:
|
|||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||||
PINNED_NIGHTLY: nightly-2022-05-20
|
PINNED_NIGHTLY: nightly-2022-05-20
|
||||||
|
# Prevent Github API rate limiting.
|
||||||
|
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
jobs:
|
jobs:
|
||||||
target-branch-check:
|
target-branch-check:
|
||||||
name: target-branch-check
|
name: target-branch-check
|
||||||
@ -51,7 +53,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -95,7 +97,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run beacon_chain tests for all known forks
|
- name: Run beacon_chain tests for all known forks
|
||||||
@ -109,7 +111,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run operation_pool tests for all known forks
|
- name: Run operation_pool tests for all known forks
|
||||||
@ -133,7 +135,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -149,7 +151,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run state_transition_vectors in release.
|
- name: Run state_transition_vectors in release.
|
||||||
@ -163,7 +165,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||||
@ -189,7 +191,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -205,7 +207,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -221,7 +223,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -237,7 +239,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -253,7 +255,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -285,7 +287,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run exec engine integration tests in release
|
- name: Run exec engine integration tests in release
|
||||||
@ -299,7 +301,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Typecheck benchmark code without running it
|
- name: Typecheck benchmark code without running it
|
||||||
@ -323,7 +325,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Lint code for quality and style with Clippy
|
- name: Lint code for quality and style with Clippy
|
||||||
@ -346,7 +348,7 @@ jobs:
|
|||||||
cargo build --release --bin cargo-clippy --bin clippy-driver
|
cargo build --release --bin cargo-clippy --bin clippy-driver
|
||||||
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run Clippy with the disallowed-from-async lint
|
- name: Run Clippy with the disallowed-from-async lint
|
||||||
@ -360,7 +362,7 @@ jobs:
|
|||||||
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
||||||
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
@ -404,7 +406,7 @@ jobs:
|
|||||||
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
||||||
# https://github.com/est31/cargo-udeps/issues/135
|
# https://github.com/est31/cargo-udeps/issues/135
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install cargo-udeps
|
- name: Install cargo-udeps
|
||||||
|
8
Makefile
8
Makefile
@ -28,7 +28,7 @@ PROFILE ?= release
|
|||||||
|
|
||||||
# List of all hard forks. This list is used to set env variables for several tests so that
|
# List of all hard forks. This list is used to set env variables for several tests so that
|
||||||
# they run for different forks.
|
# they run for different forks.
|
||||||
FORKS=phase0 altair merge
|
FORKS=phase0 altair merge capella
|
||||||
|
|
||||||
# Builds the Lighthouse binary in release (optimized).
|
# Builds the Lighthouse binary in release (optimized).
|
||||||
#
|
#
|
||||||
@ -89,12 +89,12 @@ build-release-tarballs:
|
|||||||
# Runs the full workspace tests in **release**, without downloading any additional
|
# Runs the full workspace tests in **release**, without downloading any additional
|
||||||
# test vectors.
|
# test vectors.
|
||||||
test-release:
|
test-release:
|
||||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
cargo test --workspace --features withdrawals-processing --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
||||||
|
|
||||||
# Runs the full workspace tests in **debug**, without downloading any additional test
|
# Runs the full workspace tests in **debug**, without downloading any additional test
|
||||||
# vectors.
|
# vectors.
|
||||||
test-debug:
|
test-debug:
|
||||||
cargo test --workspace --exclude ef_tests --exclude beacon_chain
|
cargo test --workspace --features withdrawals-processing --exclude ef_tests --exclude beacon_chain
|
||||||
|
|
||||||
# Runs cargo-fmt (linter).
|
# Runs cargo-fmt (linter).
|
||||||
cargo-fmt:
|
cargo-fmt:
|
||||||
@ -120,7 +120,7 @@ run-ef-tests:
|
|||||||
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
|
test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS))
|
||||||
|
|
||||||
test-beacon-chain-%:
|
test-beacon-chain-%:
|
||||||
env FORK_NAME=$* cargo test --release --features fork_from_env -p beacon_chain
|
env FORK_NAME=$* cargo test --release --features fork_from_env,withdrawals-processing -p beacon_chain
|
||||||
|
|
||||||
# Run the tests in the `operation_pool` crate for all known forks.
|
# Run the tests in the `operation_pool` crate for all known forks.
|
||||||
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
|
test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS))
|
||||||
|
@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||||
let mgr = WalletManager::open(&wallet_base_dir)
|
let mgr = WalletManager::open(wallet_base_dir)
|
||||||
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||||
|
|
||||||
for (name, _uuid) in mgr
|
for (name, _uuid) in mgr
|
||||||
|
@ -17,7 +17,6 @@ withdrawals-processing = [
|
|||||||
"beacon_chain/withdrawals-processing",
|
"beacon_chain/withdrawals-processing",
|
||||||
"store/withdrawals-processing",
|
"store/withdrawals-processing",
|
||||||
"execution_layer/withdrawals-processing",
|
"execution_layer/withdrawals-processing",
|
||||||
"http_api/withdrawals-processing",
|
|
||||||
]
|
]
|
||||||
spec-minimal = ["beacon_chain/spec-minimal"]
|
spec-minimal = ["beacon_chain/spec-minimal"]
|
||||||
|
|
||||||
|
@ -12,9 +12,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh
|
|||||||
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
||||||
withdrawals-processing = [
|
withdrawals-processing = [
|
||||||
"state_processing/withdrawals-processing",
|
"state_processing/withdrawals-processing",
|
||||||
"store/withdrawals-processing",
|
|
||||||
"execution_layer/withdrawals-processing",
|
"execution_layer/withdrawals-processing",
|
||||||
"operation_pool/withdrawals-processing"
|
|
||||||
]
|
]
|
||||||
spec-minimal = ["kzg/minimal-spec"]
|
spec-minimal = ["kzg/minimal-spec"]
|
||||||
|
|
||||||
|
@ -366,7 +366,6 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
pub(crate) observed_attester_slashings:
|
pub(crate) observed_attester_slashings:
|
||||||
Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>,
|
Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>,
|
||||||
/// Maintains a record of which validators we've seen BLS to execution changes for.
|
/// Maintains a record of which validators we've seen BLS to execution changes for.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub(crate) observed_bls_to_execution_changes:
|
pub(crate) observed_bls_to_execution_changes:
|
||||||
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
|
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
|
||||||
/// The most recently validated light client finality update received on gossip.
|
/// The most recently validated light client finality update received on gossip.
|
||||||
@ -2293,29 +2292,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&self,
|
&self,
|
||||||
bls_to_execution_change: SignedBlsToExecutionChange,
|
bls_to_execution_change: SignedBlsToExecutionChange,
|
||||||
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
|
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
|
||||||
{
|
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
|
||||||
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
|
// Disallow BLS to execution changes prior to the Capella fork.
|
||||||
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
|
return Err(Error::BlsToExecutionChangeBadFork(current_fork));
|
||||||
// Disallow BLS to execution changes prior to the Capella fork.
|
|
||||||
return Err(Error::BlsToExecutionChangeBadFork(current_fork));
|
|
||||||
}
|
|
||||||
|
|
||||||
let wall_clock_state = self.wall_clock_state()?;
|
|
||||||
|
|
||||||
Ok(self
|
|
||||||
.observed_bls_to_execution_changes
|
|
||||||
.lock()
|
|
||||||
.verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrawals-processing is removed
|
let wall_clock_state = self.wall_clock_state()?;
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
Ok(self
|
||||||
#[allow(clippy::drop_non_drop)]
|
.observed_bls_to_execution_changes
|
||||||
drop(bls_to_execution_change);
|
.lock()
|
||||||
Ok(ObservationOutcome::AlreadyKnown)
|
.verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Import a BLS to execution change to the op pool.
|
/// Import a BLS to execution change to the op pool.
|
||||||
@ -2324,12 +2312,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
bls_to_execution_change: SigVerifiedOp<SignedBlsToExecutionChange, T::EthSpec>,
|
bls_to_execution_change: SigVerifiedOp<SignedBlsToExecutionChange, T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
if self.eth1_chain.is_some() {
|
if self.eth1_chain.is_some() {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
self.op_pool
|
self.op_pool
|
||||||
.insert_bls_to_execution_change(bls_to_execution_change);
|
.insert_bls_to_execution_change(bls_to_execution_change);
|
||||||
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
drop(bls_to_execution_change);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4879,9 +4863,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.ok_or(Error::InvalidSlot(prepare_slot))?
|
.ok_or(Error::InvalidSlot(prepare_slot))?
|
||||||
.as_secs(),
|
.as_secs(),
|
||||||
pre_payload_attributes.prev_randao,
|
pre_payload_attributes.prev_randao,
|
||||||
execution_layer
|
execution_layer.get_suggested_fee_recipient(proposer).await,
|
||||||
.get_suggested_fee_recipient(proposer as u64)
|
|
||||||
.await,
|
|
||||||
withdrawals,
|
withdrawals,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -815,7 +815,6 @@ where
|
|||||||
observed_voluntary_exits: <_>::default(),
|
observed_voluntary_exits: <_>::default(),
|
||||||
observed_proposer_slashings: <_>::default(),
|
observed_proposer_slashings: <_>::default(),
|
||||||
observed_attester_slashings: <_>::default(),
|
observed_attester_slashings: <_>::default(),
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
observed_bls_to_execution_changes: <_>::default(),
|
observed_bls_to_execution_changes: <_>::default(),
|
||||||
latest_seen_finality_update: <_>::default(),
|
latest_seen_finality_update: <_>::default(),
|
||||||
latest_seen_optimistic_update: <_>::default(),
|
latest_seen_optimistic_update: <_>::default(),
|
||||||
|
@ -460,7 +460,7 @@ where
|
|||||||
if is_terminal_block_hash_set && !is_activation_epoch_reached {
|
if is_terminal_block_hash_set && !is_activation_epoch_reached {
|
||||||
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the
|
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the
|
||||||
// terminal block epoch yet.
|
// terminal block epoch yet.
|
||||||
return Ok(BlockProposalContents::default_at_fork(fork));
|
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
|
||||||
}
|
}
|
||||||
|
|
||||||
let terminal_pow_block_hash = execution_layer
|
let terminal_pow_block_hash = execution_layer
|
||||||
@ -473,7 +473,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal
|
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal
|
||||||
// block, return an "empty" payload.
|
// block, return an "empty" payload.
|
||||||
return Ok(BlockProposalContents::default_at_fork(fork));
|
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
latest_execution_payload_header_block_hash
|
latest_execution_payload_header_block_hash
|
||||||
|
@ -402,7 +402,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
|
|||||||
|
|
||||||
/// Returns the total number of items stored in `self`.
|
/// Returns the total number of items stored in `self`.
|
||||||
pub fn num_items(&self) -> usize {
|
pub fn num_items(&self) -> usize {
|
||||||
self.maps.iter().map(|(_, map)| map.len()).sum()
|
self.maps.values().map(T::len).sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
||||||
@ -448,11 +448,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
|
|||||||
// If we have too many maps, remove the lowest amount to ensure we only have
|
// If we have too many maps, remove the lowest amount to ensure we only have
|
||||||
// `SLOTS_RETAINED` left.
|
// `SLOTS_RETAINED` left.
|
||||||
if self.maps.len() > SLOTS_RETAINED {
|
if self.maps.len() > SLOTS_RETAINED {
|
||||||
let mut slots = self
|
let mut slots = self.maps.keys().copied().collect::<Vec<_>>();
|
||||||
.maps
|
|
||||||
.iter()
|
|
||||||
.map(|(slot, _map)| *slot)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
||||||
// negligible.
|
// negligible.
|
||||||
slots.sort_unstable();
|
slots.sort_unstable();
|
||||||
|
@ -6,12 +6,9 @@ use std::collections::HashSet;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
|
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
|
||||||
SignedVoluntaryExit, Slot,
|
SignedBlsToExecutionChange, SignedVoluntaryExit, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use types::SignedBlsToExecutionChange;
|
|
||||||
|
|
||||||
/// Number of validator indices to store on the stack in `observed_validators`.
|
/// Number of validator indices to store on the stack in `observed_validators`.
|
||||||
pub const SMALL_VEC_SIZE: usize = 8;
|
pub const SMALL_VEC_SIZE: usize = 8;
|
||||||
|
|
||||||
@ -83,7 +80,6 @@ impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
|
impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
|
||||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||||
smallvec![self.message.validator_index]
|
smallvec![self.message.validator_index]
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
//! Utilities for managing database schema changes.
|
//! Utilities for managing database schema changes.
|
||||||
mod migration_schema_v12;
|
mod migration_schema_v12;
|
||||||
mod migration_schema_v13;
|
mod migration_schema_v13;
|
||||||
|
mod migration_schema_v14;
|
||||||
|
|
||||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
||||||
use crate::eth1_chain::SszEth1;
|
use crate::eth1_chain::SszEth1;
|
||||||
@ -114,6 +115,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
(SchemaVersion(13), SchemaVersion(14)) => {
|
||||||
|
let ops = migration_schema_v14::upgrade_to_v14::<T>(db.clone(), log)?;
|
||||||
|
db.store_schema_version_atomically(to, ops)
|
||||||
|
}
|
||||||
|
(SchemaVersion(14), SchemaVersion(13)) => {
|
||||||
|
let ops = migration_schema_v14::downgrade_from_v14::<T>(db.clone(), log)?;
|
||||||
|
db.store_schema_version_atomically(to, ops)
|
||||||
|
}
|
||||||
// Anything else is an error.
|
// Anything else is an error.
|
||||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||||
target_version: to,
|
target_version: to,
|
||||||
|
@ -168,16 +168,14 @@ pub fn downgrade_from_v12<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V12 op pool and transform it to V5.
|
// Load a V12 op pool and transform it to V5.
|
||||||
let PersistedOperationPoolV12 {
|
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
} = if let Some(PersistedOperationPool::<T::EthSpec>::V12(op_pool)) =
|
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
db.get_item(&OP_POOL_DB_KEY)?
|
op_pool_v12
|
||||||
{
|
|
||||||
op_pool
|
|
||||||
} else {
|
} else {
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
|
@ -0,0 +1,75 @@
|
|||||||
|
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
|
||||||
|
use operation_pool::{
|
||||||
|
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
||||||
|
};
|
||||||
|
use slog::{debug, info, Logger};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
|
||||||
|
|
||||||
|
pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
||||||
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
|
log: Logger,
|
||||||
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
|
// Load a V12 op pool and transform it to V14.
|
||||||
|
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
|
op_pool_v12
|
||||||
|
} else {
|
||||||
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
return Ok(vec![]);
|
||||||
|
};
|
||||||
|
|
||||||
|
// initialize with empty vector
|
||||||
|
let bls_to_execution_changes = vec![];
|
||||||
|
let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
|
});
|
||||||
|
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn downgrade_from_v14<T: BeaconChainTypes>(
|
||||||
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
|
log: Logger,
|
||||||
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
|
// Load a V14 op pool and transform it to V12.
|
||||||
|
let PersistedOperationPoolV14 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
|
} = if let Some(PersistedOperationPool::<T::EthSpec>::V14(op_pool)) =
|
||||||
|
db.get_item(&OP_POOL_DB_KEY)?
|
||||||
|
{
|
||||||
|
op_pool
|
||||||
|
} else {
|
||||||
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
return Ok(vec![]);
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"Dropping bls_to_execution_changes from pool";
|
||||||
|
"count" => bls_to_execution_changes.len(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let v12 = PersistedOperationPoolV12 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
};
|
||||||
|
Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)])
|
||||||
|
}
|
@ -11,11 +11,11 @@ use crate::{
|
|||||||
StateSkipConfig,
|
StateSkipConfig,
|
||||||
};
|
};
|
||||||
use bls::get_withdrawal_credentials;
|
use bls::get_withdrawal_credentials;
|
||||||
use execution_layer::test_utils::DEFAULT_JWT_SECRET;
|
|
||||||
use execution_layer::{
|
use execution_layer::{
|
||||||
auth::JwtKey,
|
auth::JwtKey,
|
||||||
test_utils::{
|
test_utils::{
|
||||||
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK,
|
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET,
|
||||||
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
},
|
},
|
||||||
ExecutionLayer,
|
ExecutionLayer,
|
||||||
};
|
};
|
||||||
@ -383,14 +383,43 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
|
||||||
|
let mock = self
|
||||||
|
.mock_execution_layer
|
||||||
|
.as_mut()
|
||||||
|
.expect("must have mock execution layer to recalculate fork times");
|
||||||
|
let spec = self
|
||||||
|
.spec
|
||||||
|
.clone()
|
||||||
|
.expect("cannot recalculate fork times without spec");
|
||||||
|
mock.server.execution_block_generator().shanghai_time =
|
||||||
|
spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
mock.server.execution_block_generator().eip4844_time =
|
||||||
|
spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn mock_execution_layer(mut self) -> Self {
|
pub fn mock_execution_layer(mut self) -> Self {
|
||||||
let spec = self.spec.clone().expect("cannot build without spec");
|
let spec = self.spec.clone().expect("cannot build without spec");
|
||||||
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
let mock = MockExecutionLayer::new(
|
let mock = MockExecutionLayer::new(
|
||||||
self.runtime.task_executor.clone(),
|
self.runtime.task_executor.clone(),
|
||||||
spec.terminal_total_difficulty,
|
spec.terminal_total_difficulty,
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
spec.terminal_block_hash,
|
spec.terminal_block_hash,
|
||||||
spec.terminal_block_hash_activation_epoch,
|
spec.terminal_block_hash_activation_epoch,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
@ -405,12 +434,20 @@ where
|
|||||||
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
|
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
|
||||||
|
|
||||||
let spec = self.spec.clone().expect("cannot build without spec");
|
let spec = self.spec.clone().expect("cannot build without spec");
|
||||||
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
let mock_el = MockExecutionLayer::new(
|
let mock_el = MockExecutionLayer::new(
|
||||||
self.runtime.task_executor.clone(),
|
self.runtime.task_executor.clone(),
|
||||||
spec.terminal_total_difficulty,
|
spec.terminal_total_difficulty,
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
spec.terminal_block_hash,
|
spec.terminal_block_hash,
|
||||||
spec.terminal_block_hash_activation_epoch,
|
spec.terminal_block_hash_activation_epoch,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
Some(builder_url.clone()),
|
Some(builder_url.clone()),
|
||||||
)
|
)
|
||||||
@ -1459,7 +1496,7 @@ where
|
|||||||
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
||||||
|
|
||||||
let signed_block = block.sign(
|
let signed_block = block.sign(
|
||||||
&self.validator_keypairs[proposer_index as usize].sk,
|
&self.validator_keypairs[proposer_index].sk,
|
||||||
&state.fork(),
|
&state.fork(),
|
||||||
state.genesis_validators_root(),
|
state.genesis_validators_root(),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
|
@ -631,10 +631,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
|
|
||||||
// Return the `id`'s of all monitored validators.
|
// Return the `id`'s of all monitored validators.
|
||||||
pub fn get_all_monitored_validators(&self) -> Vec<String> {
|
pub fn get_all_monitored_validators(&self) -> Vec<String> {
|
||||||
self.validators
|
self.validators.values().map(|val| val.id.clone()).collect()
|
||||||
.iter()
|
|
||||||
.map(|(_, val)| val.id.clone())
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
||||||
|
170
beacon_node/beacon_chain/tests/capella.rs
Normal file
170
beacon_node/beacon_chain/tests/capella.rs
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
#![cfg(not(debug_assertions))] // Tests run too slow in debug.
|
||||||
|
|
||||||
|
use beacon_chain::test_utils::BeaconChainHarness;
|
||||||
|
use execution_layer::test_utils::Block;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
const VALIDATOR_COUNT: usize = 32;
|
||||||
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
|
fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
|
||||||
|
let mut prev_ep: Option<FullPayload<T>> = None;
|
||||||
|
|
||||||
|
for ep in chain {
|
||||||
|
assert!(!ep.is_default_with_empty_roots());
|
||||||
|
assert!(ep.block_hash() != ExecutionBlockHash::zero());
|
||||||
|
|
||||||
|
// Check against previous `ExecutionPayload`.
|
||||||
|
if let Some(prev_ep) = prev_ep {
|
||||||
|
assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash());
|
||||||
|
assert_eq!(
|
||||||
|
prev_ep.execution_payload().block_number() + 1,
|
||||||
|
ep.execution_payload().block_number()
|
||||||
|
);
|
||||||
|
assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp());
|
||||||
|
}
|
||||||
|
prev_ep = Some(ep.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn base_altair_merge_capella() {
|
||||||
|
let altair_fork_epoch = Epoch::new(4);
|
||||||
|
let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let bellatrix_fork_epoch = Epoch::new(8);
|
||||||
|
let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let capella_fork_epoch = Epoch::new(12);
|
||||||
|
let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
|
||||||
|
let mut spec = E::default_spec();
|
||||||
|
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||||
|
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
|
||||||
|
spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||||
|
|
||||||
|
let harness = BeaconChainHarness::builder(E::default())
|
||||||
|
.spec(spec)
|
||||||
|
.logger(logging::test_logger())
|
||||||
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
|
.fresh_ephemeral_store()
|
||||||
|
.mock_execution_layer()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start with the base fork.
|
||||||
|
*/
|
||||||
|
assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the Altair fork.
|
||||||
|
*/
|
||||||
|
harness.extend_to_slot(altair_fork_slot).await;
|
||||||
|
|
||||||
|
let altair_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(altair_head.as_altair().is_ok());
|
||||||
|
assert_eq!(altair_head.slot(), altair_fork_slot);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the merge fork, without a terminal PoW block.
|
||||||
|
*/
|
||||||
|
harness.extend_to_slot(merge_fork_slot).await;
|
||||||
|
|
||||||
|
let merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(merge_head.as_merge().is_ok());
|
||||||
|
assert_eq!(merge_head.slot(), merge_fork_slot);
|
||||||
|
assert!(
|
||||||
|
merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"Merge head is default payload"
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next merge block shouldn't include an exec payload.
|
||||||
|
*/
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
|
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(
|
||||||
|
one_after_merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"One after merge head is default payload"
|
||||||
|
);
|
||||||
|
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trigger the terminal PoW block.
|
||||||
|
*/
|
||||||
|
harness
|
||||||
|
.execution_block_generator()
|
||||||
|
.move_to_terminal_block()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Add a slot duration to get to the next slot
|
||||||
|
let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot;
|
||||||
|
harness
|
||||||
|
.execution_block_generator()
|
||||||
|
.modify_last_block(|block| {
|
||||||
|
if let Block::PoW(terminal_block) = block {
|
||||||
|
terminal_block.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
|
let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(
|
||||||
|
two_after_merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"Two after merge head is default payload"
|
||||||
|
);
|
||||||
|
assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next merge block should include an exec payload.
|
||||||
|
*/
|
||||||
|
let mut execution_payloads = vec![];
|
||||||
|
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
let block = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
let full_payload: FullPayload<E> = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.clone()
|
||||||
|
.into();
|
||||||
|
// pre-capella shouldn't have withdrawals
|
||||||
|
assert!(full_payload.withdrawals_root().is_err());
|
||||||
|
execution_payloads.push(full_payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should enter capella fork now.
|
||||||
|
*/
|
||||||
|
for _ in 0..16 {
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
let block = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
let full_payload: FullPayload<E> = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.clone()
|
||||||
|
.into();
|
||||||
|
// post-capella should have withdrawals
|
||||||
|
assert!(full_payload.withdrawals_root().is_ok());
|
||||||
|
execution_payloads.push(full_payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_execution_payload_chain(execution_payloads.as_slice());
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
mod attestation_production;
|
mod attestation_production;
|
||||||
mod attestation_verification;
|
mod attestation_verification;
|
||||||
mod block_verification;
|
mod block_verification;
|
||||||
|
mod capella;
|
||||||
mod merge;
|
mod merge;
|
||||||
mod op_verification;
|
mod op_verification;
|
||||||
mod payload_invalidation;
|
mod payload_invalidation;
|
||||||
|
@ -191,18 +191,17 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
|
|||||||
|
|
||||||
harness.extend_slots(1).await;
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
// FIXME: why is this being tested twice?
|
|
||||||
assert!(
|
assert!(
|
||||||
one_after_merge_head
|
two_after_merge_head
|
||||||
.message()
|
.message()
|
||||||
.body()
|
.body()
|
||||||
.execution_payload()
|
.execution_payload()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_default_with_empty_roots(),
|
.is_default_with_empty_roots(),
|
||||||
"One after merge head is default payload"
|
"Two after merge head is default payload"
|
||||||
);
|
);
|
||||||
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2);
|
assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next merge block should include an exec payload.
|
* Next merge block should include an exec payload.
|
||||||
|
@ -675,7 +675,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_finalization_boundaries() {
|
fn test_finalization_boundaries() {
|
||||||
let n = 8;
|
let n = 8;
|
||||||
let half = (n / 2) as usize;
|
let half = n / 2;
|
||||||
|
|
||||||
let mut deposit_cache = get_cache_with_deposits(n as u64);
|
let mut deposit_cache = get_cache_with_deposits(n as u64);
|
||||||
|
|
||||||
@ -828,9 +828,9 @@ pub mod tests {
|
|||||||
// get_log(half+quarter) should return log with index `half+quarter`
|
// get_log(half+quarter) should return log with index `half+quarter`
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
q3_log_before_finalization.index,
|
q3_log_before_finalization.index,
|
||||||
(half + quarter) as u64,
|
half + quarter,
|
||||||
"log index should be {}",
|
"log index should be {}",
|
||||||
(half + quarter),
|
half + quarter,
|
||||||
);
|
);
|
||||||
|
|
||||||
// get lower quarter of deposits with max deposit count
|
// get lower quarter of deposits with max deposit count
|
||||||
|
@ -5,7 +5,7 @@ edition = "2021"
|
|||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
[features]
|
[features]
|
||||||
withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"]
|
withdrawals-processing = ["state_processing/withdrawals-processing"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
|
@ -27,7 +27,7 @@ impl From<jsonwebtoken::errors::Error> for Error {
|
|||||||
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
|
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
|
||||||
#[derive(Zeroize, Clone)]
|
#[derive(Zeroize, Clone)]
|
||||||
#[zeroize(drop)]
|
#[zeroize(drop)]
|
||||||
pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]);
|
pub struct JwtKey([u8; JWT_SECRET_LENGTH]);
|
||||||
|
|
||||||
impl JwtKey {
|
impl JwtKey {
|
||||||
/// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`.
|
/// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`.
|
||||||
|
@ -889,11 +889,11 @@ impl HttpJsonRpc {
|
|||||||
pub async fn supported_apis_v1(&self) -> Result<SupportedApis, Error> {
|
pub async fn supported_apis_v1(&self) -> Result<SupportedApis, Error> {
|
||||||
Ok(SupportedApis {
|
Ok(SupportedApis {
|
||||||
new_payload_v1: true,
|
new_payload_v1: true,
|
||||||
new_payload_v2: cfg!(feature = "withdrawals-processing"),
|
new_payload_v2: cfg!(any(feature = "withdrawals-processing", test)),
|
||||||
forkchoice_updated_v1: true,
|
forkchoice_updated_v1: true,
|
||||||
forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"),
|
forkchoice_updated_v2: cfg!(any(feature = "withdrawals-processing", test)),
|
||||||
get_payload_v1: true,
|
get_payload_v1: true,
|
||||||
get_payload_v2: cfg!(feature = "withdrawals-processing"),
|
get_payload_v2: cfg!(any(feature = "withdrawals-processing", test)),
|
||||||
exchange_transition_configuration_v1: true,
|
exchange_transition_configuration_v1: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -348,12 +348,14 @@ impl From<Withdrawal> for JsonWithdrawal {
|
|||||||
|
|
||||||
impl From<JsonWithdrawal> for Withdrawal {
|
impl From<JsonWithdrawal> for Withdrawal {
|
||||||
fn from(jw: JsonWithdrawal) -> Self {
|
fn from(jw: JsonWithdrawal) -> Self {
|
||||||
|
// This comparison is done to avoid a scenario where the EE gives us too large a number and we
|
||||||
|
// panic when attempting to cast to a `u64`.
|
||||||
|
let amount = std::cmp::max(jw.amount / 1000000000, Uint256::from(u64::MAX));
|
||||||
Self {
|
Self {
|
||||||
index: jw.index,
|
index: jw.index,
|
||||||
validator_index: jw.validator_index,
|
validator_index: jw.validator_index,
|
||||||
address: jw.address,
|
address: jw.address,
|
||||||
//FIXME(sean) if EE gives us too large a number this panics
|
amount: amount.as_u64(),
|
||||||
amount: (jw.amount / 1000000000).as_u64(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ use tokio::{
|
|||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment};
|
use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment};
|
||||||
use types::{
|
use types::{
|
||||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
||||||
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
|
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
|
||||||
@ -95,6 +95,13 @@ pub enum Error {
|
|||||||
FeeRecipientUnspecified,
|
FeeRecipientUnspecified,
|
||||||
MissingLatestValidHash,
|
MissingLatestValidHash,
|
||||||
InvalidJWTSecret(String),
|
InvalidJWTSecret(String),
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for Error {
|
||||||
|
fn from(e: BeaconStateError) -> Self {
|
||||||
|
Error::BeaconStateError(e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ApiError> for Error {
|
impl From<ApiError> for Error {
|
||||||
@ -150,17 +157,17 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
|||||||
} => payload,
|
} => payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn default_at_fork(fork_name: ForkName) -> Self {
|
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, BeaconStateError> {
|
||||||
match fork_name {
|
Ok(match fork_name {
|
||||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||||
BlockProposalContents::Payload(Payload::default_at_fork(fork_name))
|
BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?)
|
||||||
}
|
}
|
||||||
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
||||||
payload: Payload::default_at_fork(fork_name),
|
payload: Payload::default_at_fork(fork_name)?,
|
||||||
blobs: VariableList::default(),
|
blobs: VariableList::default(),
|
||||||
kzg_commitments: VariableList::default(),
|
kzg_commitments: VariableList::default(),
|
||||||
},
|
},
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -805,10 +812,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
spec,
|
spec,
|
||||||
) {
|
) {
|
||||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
Err(reason) if !reason.payload_invalid() => {
|
Err(reason) if !reason.payload_invalid() => {
|
||||||
@ -860,19 +863,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
spec,
|
spec,
|
||||||
) {
|
) {
|
||||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
// If the payload is valid then use it. The local EE failed
|
// If the payload is valid then use it. The local EE failed
|
||||||
// to produce a payload so we have no alternative.
|
// to produce a payload so we have no alternative.
|
||||||
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
Err(reason) => {
|
Err(reason) => {
|
||||||
|
@ -13,7 +13,8 @@ use std::collections::HashMap;
|
|||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use tree_hash_derive::TreeHash;
|
use tree_hash_derive::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256,
|
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
|
||||||
|
ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256,
|
||||||
};
|
};
|
||||||
|
|
||||||
const GAS_LIMIT: u64 = 16384;
|
const GAS_LIMIT: u64 = 16384;
|
||||||
@ -113,6 +114,11 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
|
|||||||
pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>,
|
pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>,
|
||||||
pub next_payload_id: u64,
|
pub next_payload_id: u64,
|
||||||
pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>,
|
pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>,
|
||||||
|
/*
|
||||||
|
* Post-merge fork triggers
|
||||||
|
*/
|
||||||
|
pub shanghai_time: Option<u64>, // withdrawals
|
||||||
|
pub eip4844_time: Option<u64>, // 4844
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||||
@ -120,6 +126,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
terminal_total_difficulty: Uint256,
|
terminal_total_difficulty: Uint256,
|
||||||
terminal_block_number: u64,
|
terminal_block_number: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
terminal_block_hash: ExecutionBlockHash,
|
||||||
|
shanghai_time: Option<u64>,
|
||||||
|
eip4844_time: Option<u64>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut gen = Self {
|
let mut gen = Self {
|
||||||
head_block: <_>::default(),
|
head_block: <_>::default(),
|
||||||
@ -132,6 +140,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
pending_payloads: <_>::default(),
|
pending_payloads: <_>::default(),
|
||||||
next_payload_id: 0,
|
next_payload_id: 0,
|
||||||
payload_ids: <_>::default(),
|
payload_ids: <_>::default(),
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
};
|
};
|
||||||
|
|
||||||
gen.insert_pow_block(0).unwrap();
|
gen.insert_pow_block(0).unwrap();
|
||||||
@ -163,6 +173,16 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
|
||||||
|
match self.eip4844_time {
|
||||||
|
Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844,
|
||||||
|
_ => match self.shanghai_time {
|
||||||
|
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
|
||||||
|
_ => ForkName::Merge,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
|
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
|
||||||
self.block_by_number(number)
|
self.block_by_number(number)
|
||||||
.map(|block| block.as_execution_block(self.terminal_total_difficulty))
|
.map(|block| block.as_execution_block(self.terminal_total_difficulty))
|
||||||
@ -395,7 +415,9 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn forkchoice_updated_v1(
|
// This function expects payload_attributes to already be validated with respect to
|
||||||
|
// the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)]
|
||||||
|
pub fn forkchoice_updated(
|
||||||
&mut self,
|
&mut self,
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
@ -469,23 +491,65 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
transactions: vec![].into(),
|
transactions: vec![].into(),
|
||||||
}),
|
}),
|
||||||
PayloadAttributes::V2(pa) => {
|
PayloadAttributes::V2(pa) => {
|
||||||
// FIXME: think about how to test different forks
|
match self.get_fork_at_timestamp(pa.timestamp) {
|
||||||
ExecutionPayload::Merge(ExecutionPayloadMerge {
|
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||||
parent_hash: forkchoice_state.head_block_hash,
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
fee_recipient: pa.suggested_fee_recipient,
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
receipts_root: Hash256::repeat_byte(42),
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
state_root: Hash256::repeat_byte(43),
|
state_root: Hash256::repeat_byte(43),
|
||||||
logs_bloom: vec![0; 256].into(),
|
logs_bloom: vec![0; 256].into(),
|
||||||
prev_randao: pa.prev_randao,
|
prev_randao: pa.prev_randao,
|
||||||
block_number: parent.block_number() + 1,
|
block_number: parent.block_number() + 1,
|
||||||
gas_limit: GAS_LIMIT,
|
gas_limit: GAS_LIMIT,
|
||||||
gas_used: GAS_USED,
|
gas_used: GAS_USED,
|
||||||
timestamp: pa.timestamp,
|
timestamp: pa.timestamp,
|
||||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
base_fee_per_gas: Uint256::one(),
|
base_fee_per_gas: Uint256::one(),
|
||||||
block_hash: ExecutionBlockHash::zero(),
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
transactions: vec![].into(),
|
transactions: vec![].into(),
|
||||||
})
|
}),
|
||||||
|
ForkName::Capella => {
|
||||||
|
ExecutionPayload::Capella(ExecutionPayloadCapella {
|
||||||
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
|
state_root: Hash256::repeat_byte(43),
|
||||||
|
logs_bloom: vec![0; 256].into(),
|
||||||
|
prev_randao: pa.prev_randao,
|
||||||
|
block_number: parent.block_number() + 1,
|
||||||
|
gas_limit: GAS_LIMIT,
|
||||||
|
gas_used: GAS_USED,
|
||||||
|
timestamp: pa.timestamp,
|
||||||
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
|
base_fee_per_gas: Uint256::one(),
|
||||||
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
|
transactions: vec![].into(),
|
||||||
|
withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ForkName::Eip4844 => {
|
||||||
|
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
|
||||||
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
|
state_root: Hash256::repeat_byte(43),
|
||||||
|
logs_bloom: vec![0; 256].into(),
|
||||||
|
prev_randao: pa.prev_randao,
|
||||||
|
block_number: parent.block_number() + 1,
|
||||||
|
gas_limit: GAS_LIMIT,
|
||||||
|
gas_used: GAS_USED,
|
||||||
|
timestamp: pa.timestamp,
|
||||||
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
|
base_fee_per_gas: Uint256::one(),
|
||||||
|
// FIXME(4844): maybe this should be set to something?
|
||||||
|
excess_data_gas: Uint256::one(),
|
||||||
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
|
transactions: vec![].into(),
|
||||||
|
withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -576,6 +640,8 @@ mod test {
|
|||||||
TERMINAL_DIFFICULTY.into(),
|
TERMINAL_DIFFICULTY.into(),
|
||||||
TERMINAL_BLOCK,
|
TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
for i in 0..=TERMINAL_BLOCK {
|
for i in 0..=TERMINAL_BLOCK {
|
||||||
|
@ -82,17 +82,40 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
ENGINE_NEW_PAYLOAD_V2 => {
|
ENGINE_NEW_PAYLOAD_V2 => {
|
||||||
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
|
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
|
||||||
}
|
}
|
||||||
|
// TODO(4844) add that here..
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let fork = match request {
|
|
||||||
JsonExecutionPayload::V1(_) => ForkName::Merge,
|
let fork = ctx
|
||||||
JsonExecutionPayload::V2(ref payload) => {
|
.execution_block_generator
|
||||||
if payload.withdrawals.is_none() {
|
.read()
|
||||||
ForkName::Merge
|
.get_fork_at_timestamp(*request.timestamp());
|
||||||
} else {
|
// validate method called correctly according to shanghai fork time
|
||||||
ForkName::Capella
|
match fork {
|
||||||
|
ForkName::Merge => {
|
||||||
|
if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() {
|
||||||
|
return Err(format!(
|
||||||
|
"{} called with `withdrawals` before capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ForkName::Capella => {
|
||||||
|
if method == ENGINE_NEW_PAYLOAD_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if request.withdrawals().is_err()
|
||||||
|
|| (request.withdrawals().is_ok()
|
||||||
|
&& request.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO(4844) add 4844 error checking here
|
||||||
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Canned responses set by block hash take priority.
|
// Canned responses set by block hash take priority.
|
||||||
@ -125,7 +148,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
|
|
||||||
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
|
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
|
||||||
}
|
}
|
||||||
ENGINE_GET_PAYLOAD_V1 => {
|
ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 => {
|
||||||
let request: JsonPayloadIdRequest = get_param(params, 0)?;
|
let request: JsonPayloadIdRequest = get_param(params, 0)?;
|
||||||
let id = request.into();
|
let id = request.into();
|
||||||
|
|
||||||
@ -135,12 +158,76 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
.get_payload(&id)
|
.get_payload(&id)
|
||||||
.ok_or_else(|| format!("no payload for id {:?}", id))?;
|
.ok_or_else(|| format!("no payload for id {:?}", id))?;
|
||||||
|
|
||||||
Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap())
|
// validate method called correctly according to shanghai fork time
|
||||||
|
if ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.read()
|
||||||
|
.get_fork_at_timestamp(response.timestamp())
|
||||||
|
== ForkName::Capella
|
||||||
|
&& method == ENGINE_GET_PAYLOAD_V1
|
||||||
|
{
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
// TODO(4844) add 4844 error checking here
|
||||||
|
|
||||||
|
match method {
|
||||||
|
ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value(
|
||||||
|
JsonExecutionPayloadV1::try_from(response).unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap()),
|
||||||
|
ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse {
|
||||||
|
execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(),
|
||||||
|
})
|
||||||
|
.unwrap()),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// FIXME(capella): handle fcu version 2
|
ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||||
ENGINE_FORKCHOICE_UPDATED_V1 => {
|
|
||||||
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
||||||
let payload_attributes: Option<JsonPayloadAttributes> = get_param(params, 1)?;
|
let payload_attributes = match method {
|
||||||
|
ENGINE_FORKCHOICE_UPDATED_V1 => {
|
||||||
|
let jpa1: Option<JsonPayloadAttributesV1> = get_param(params, 1)?;
|
||||||
|
jpa1.map(JsonPayloadAttributes::V1)
|
||||||
|
}
|
||||||
|
ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||||
|
let jpa2: Option<JsonPayloadAttributesV2> = get_param(params, 1)?;
|
||||||
|
jpa2.map(JsonPayloadAttributes::V2)
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// validate method called correctly according to shanghai fork time
|
||||||
|
if let Some(pa) = payload_attributes.as_ref() {
|
||||||
|
match ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.read()
|
||||||
|
.get_fork_at_timestamp(*pa.timestamp())
|
||||||
|
{
|
||||||
|
ForkName::Merge => {
|
||||||
|
if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() {
|
||||||
|
return Err(format!(
|
||||||
|
"{} called with `withdrawals` before capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ForkName::Capella => {
|
||||||
|
if method == ENGINE_FORKCHOICE_UPDATED_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if pa.withdrawals().is_err()
|
||||||
|
|| (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO(4844) add 4844 error checking here
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(hook_response) = ctx
|
if let Some(hook_response) = ctx
|
||||||
.hook
|
.hook
|
||||||
@ -161,13 +248,10 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
return Ok(serde_json::to_value(response).unwrap());
|
return Ok(serde_json::to_value(response).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut response = ctx
|
let mut response = ctx.execution_block_generator.write().forkchoice_updated(
|
||||||
.execution_block_generator
|
forkchoice_state.into(),
|
||||||
.write()
|
payload_attributes.map(|json| json.into()),
|
||||||
.forkchoice_updated_v1(
|
)?;
|
||||||
forkchoice_state.into(),
|
|
||||||
payload_attributes.map(|json| json.into()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() {
|
if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() {
|
||||||
if status.status == PayloadStatusV1Status::Valid {
|
if status.status == PayloadStatusV1Status::Valid {
|
||||||
|
@ -26,17 +26,22 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
Epoch::new(0),
|
Epoch::new(0),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
terminal_total_difficulty: Uint256,
|
terminal_total_difficulty: Uint256,
|
||||||
terminal_block: u64,
|
terminal_block: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
terminal_block_hash: ExecutionBlockHash,
|
||||||
terminal_block_hash_activation_epoch: Epoch,
|
terminal_block_hash_activation_epoch: Epoch,
|
||||||
|
shanghai_time: Option<u64>,
|
||||||
|
eip4844_time: Option<u64>,
|
||||||
jwt_key: Option<JwtKey>,
|
jwt_key: Option<JwtKey>,
|
||||||
builder_url: Option<SensitiveUrl>,
|
builder_url: Option<SensitiveUrl>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -54,6 +59,8 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
terminal_total_difficulty,
|
terminal_total_difficulty,
|
||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
);
|
);
|
||||||
|
|
||||||
let url = SensitiveUrl::parse(&server.url()).unwrap();
|
let url = SensitiveUrl::parse(&server.url()).unwrap();
|
||||||
|
@ -45,6 +45,8 @@ pub struct MockExecutionConfig {
|
|||||||
pub terminal_difficulty: Uint256,
|
pub terminal_difficulty: Uint256,
|
||||||
pub terminal_block: u64,
|
pub terminal_block: u64,
|
||||||
pub terminal_block_hash: ExecutionBlockHash,
|
pub terminal_block_hash: ExecutionBlockHash,
|
||||||
|
pub shanghai_time: Option<u64>,
|
||||||
|
pub eip4844_time: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MockExecutionConfig {
|
impl Default for MockExecutionConfig {
|
||||||
@ -55,6 +57,8 @@ impl Default for MockExecutionConfig {
|
|||||||
terminal_block: DEFAULT_TERMINAL_BLOCK,
|
terminal_block: DEFAULT_TERMINAL_BLOCK,
|
||||||
terminal_block_hash: ExecutionBlockHash::zero(),
|
terminal_block_hash: ExecutionBlockHash::zero(),
|
||||||
server_config: Config::default(),
|
server_config: Config::default(),
|
||||||
|
shanghai_time: None,
|
||||||
|
eip4844_time: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,6 +78,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
DEFAULT_TERMINAL_DIFFICULTY.into(),
|
DEFAULT_TERMINAL_DIFFICULTY.into(),
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
|
None, // FIXME(capella): should this be the default?
|
||||||
|
None, // FIXME(eip4844): should this be the default?
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,11 +90,18 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
terminal_block_hash,
|
||||||
server_config,
|
server_config,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
} = config;
|
} = config;
|
||||||
let last_echo_request = Arc::new(RwLock::new(None));
|
let last_echo_request = Arc::new(RwLock::new(None));
|
||||||
let preloaded_responses = Arc::new(Mutex::new(vec![]));
|
let preloaded_responses = Arc::new(Mutex::new(vec![]));
|
||||||
let execution_block_generator =
|
let execution_block_generator = ExecutionBlockGenerator::new(
|
||||||
ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash);
|
terminal_difficulty,
|
||||||
|
terminal_block,
|
||||||
|
terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
|
);
|
||||||
|
|
||||||
let ctx: Arc<Context<T>> = Arc::new(Context {
|
let ctx: Arc<Context<T>> = Arc::new(Context {
|
||||||
config: server_config,
|
config: server_config,
|
||||||
@ -140,6 +153,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_difficulty: Uint256,
|
terminal_difficulty: Uint256,
|
||||||
terminal_block: u64,
|
terminal_block: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
terminal_block_hash: ExecutionBlockHash,
|
||||||
|
shanghai_time: Option<u64>,
|
||||||
|
eip4844_time: Option<u64>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self::new_with_config(
|
Self::new_with_config(
|
||||||
handle,
|
handle,
|
||||||
@ -149,6 +164,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_difficulty,
|
terminal_difficulty,
|
||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
autotests = false # using a single test binary compiles faster
|
autotests = false # using a single test binary compiles faster
|
||||||
|
|
||||||
[features]
|
|
||||||
withdrawals-processing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
warp = { version = "0.3.2", features = ["tls"] }
|
warp = { version = "0.3.2", features = ["tls"] }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
|
@ -216,10 +216,10 @@ impl BlockId {
|
|||||||
pub async fn blobs_sidecar<T: BeaconChainTypes>(
|
pub async fn blobs_sidecar<T: BeaconChainTypes>(
|
||||||
&self,
|
&self,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<(Arc<BlobsSidecar<T::EthSpec>>), warp::Rejection> {
|
) -> Result<Arc<BlobsSidecar<T::EthSpec>>, warp::Rejection> {
|
||||||
let root = self.root(chain)?.0;
|
let root = self.root(chain)?.0;
|
||||||
match chain.store.get_blobs(&root) {
|
match chain.store.get_blobs(&root) {
|
||||||
Ok(Some(blob)) => Ok((Arc::new(blob))),
|
Ok(Some(blob)) => Ok(Arc::new(blob)),
|
||||||
Ok(None) => Err(warp_utils::reject::custom_not_found(format!(
|
Ok(None) => Err(warp_utils::reject::custom_not_found(format!(
|
||||||
"Blob with block root {} is not in the store",
|
"Blob with block root {} is not in the store",
|
||||||
root
|
root
|
||||||
|
@ -1681,16 +1681,12 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
match chain.verify_bls_to_execution_change_for_gossip(address_change) {
|
match chain.verify_bls_to_execution_change_for_gossip(address_change) {
|
||||||
Ok(ObservationOutcome::New(verified_address_change)) => {
|
Ok(ObservationOutcome::New(verified_address_change)) => {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
publish_pubsub_message(
|
||||||
{
|
&network_tx,
|
||||||
publish_pubsub_message(
|
PubsubMessage::BlsToExecutionChange(Box::new(
|
||||||
&network_tx,
|
verified_address_change.as_inner().clone(),
|
||||||
PubsubMessage::BlsToExecutionChange(Box::new(
|
)),
|
||||||
verified_address_change.as_inner().clone(),
|
)?;
|
||||||
)),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
chain.import_bls_to_execution_change(verified_address_change);
|
chain.import_bls_to_execution_change(verified_address_change);
|
||||||
}
|
}
|
||||||
Ok(ObservationOutcome::AlreadyKnown) => {
|
Ok(ObservationOutcome::AlreadyKnown) => {
|
||||||
@ -2915,7 +2911,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
let is_live =
|
let is_live =
|
||||||
chain.validator_seen_at_epoch(index as usize, request_data.epoch);
|
chain.validator_seen_at_epoch(index as usize, request_data.epoch);
|
||||||
api_types::LivenessResponseData {
|
api_types::LivenessResponseData {
|
||||||
index: index as u64,
|
index,
|
||||||
epoch: request_data.epoch,
|
epoch: request_data.epoch,
|
||||||
is_live,
|
is_live,
|
||||||
}
|
}
|
||||||
@ -2951,7 +2947,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.and_then(
|
.and_then(
|
||||||
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
||||||
blocking_json_task(move || {
|
blocking_json_task(move || {
|
||||||
let app_uptime = app_start.elapsed().as_secs() as u64;
|
let app_uptime = app_start.elapsed().as_secs();
|
||||||
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
||||||
sysinfo,
|
sysinfo,
|
||||||
data_dir,
|
data_dir,
|
||||||
|
@ -194,6 +194,11 @@ async fn reconstruct_block<T: BeaconChainTypes>(
|
|||||||
.spec
|
.spec
|
||||||
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
|
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
|
||||||
)
|
)
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"Default payload construction error: {e:?}"
|
||||||
|
))
|
||||||
|
})?
|
||||||
.into()
|
.into()
|
||||||
// If we already have an execution payload with this transactions root cached, use it.
|
// If we already have an execution payload with this transactions root cached, use it.
|
||||||
} else if let Some(cached_payload) =
|
} else if let Some(cached_payload) =
|
||||||
|
@ -186,14 +186,7 @@ impl RealScore {
|
|||||||
|
|
||||||
/// Add an f64 to the score abiding by the limits.
|
/// Add an f64 to the score abiding by the limits.
|
||||||
fn add(&mut self, score: f64) {
|
fn add(&mut self, score: f64) {
|
||||||
let mut new_score = self.lighthouse_score + score;
|
let new_score = (self.lighthouse_score + score).clamp(MIN_SCORE, MAX_SCORE);
|
||||||
if new_score > MAX_SCORE {
|
|
||||||
new_score = MAX_SCORE;
|
|
||||||
}
|
|
||||||
if new_score < MIN_SCORE {
|
|
||||||
new_score = MIN_SCORE;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.set_lighthouse_score(new_score);
|
self.set_lighthouse_score(new_score);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ fn handle_length(
|
|||||||
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
|
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
|
||||||
match uvi_codec.decode(bytes).map_err(RPCError::from)? {
|
match uvi_codec.decode(bytes).map_err(RPCError::from)? {
|
||||||
Some(length) => {
|
Some(length) => {
|
||||||
*len = Some(length as usize);
|
*len = Some(length);
|
||||||
Ok(Some(length))
|
Ok(Some(length))
|
||||||
}
|
}
|
||||||
None => Ok(None), // need more bytes to decode length
|
None => Ok(None), // need more bytes to decode length
|
||||||
|
@ -119,8 +119,8 @@ lazy_static! {
|
|||||||
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
||||||
/// The maximum bytes that can be sent across the RPC post-merge.
|
/// The maximum bytes that can be sent across the RPC post-merge.
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
|
||||||
//FIXME(sean) should these be the same?
|
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
|
||||||
|
// FIXME(sean) should this be increased to account for blobs?
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
|
||||||
/// The protocol prefix the RPC protocol id.
|
/// The protocol prefix the RPC protocol id.
|
||||||
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
||||||
|
@ -270,11 +270,11 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
|||||||
|
|
||||||
let modulo_smaller = max(
|
let modulo_smaller = max(
|
||||||
1,
|
1,
|
||||||
smaller_committee_size / self.target_aggregators_per_committee as usize,
|
smaller_committee_size / self.target_aggregators_per_committee,
|
||||||
);
|
);
|
||||||
let modulo_larger = max(
|
let modulo_larger = max(
|
||||||
1,
|
1,
|
||||||
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
|
(smaller_committee_size + 1) / self.target_aggregators_per_committee,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
|
@ -88,7 +88,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
|||||||
hex_bytes.to_string()
|
hex_bytes.to_string()
|
||||||
};
|
};
|
||||||
|
|
||||||
hex::decode(&hex_bytes)
|
hex::decode(hex_bytes)
|
||||||
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
||||||
.and_then(keypair_from_bytes)
|
.and_then(keypair_from_bytes)
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,8 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
|
|||||||
/// before we start dropping them.
|
/// before we start dropping them.
|
||||||
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
//FIXME(sean) verify
|
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that
|
||||||
|
/// will be stored before we start dropping them.
|
||||||
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
|
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
||||||
@ -1213,7 +1214,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
// required to verify some attestations.
|
// required to verify some attestations.
|
||||||
} else if let Some(item) = gossip_block_queue.pop() {
|
} else if let Some(item) = gossip_block_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
//FIXME(sean)
|
|
||||||
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
|
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
// Check the aggregates, *then* the unaggregates since we assume that
|
// Check the aggregates, *then* the unaggregates since we assume that
|
||||||
|
@ -4,9 +4,6 @@ version = "0.2.0"
|
|||||||
authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
|
||||||
withdrawals-processing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
derivative = "2.1.1"
|
derivative = "2.1.1"
|
||||||
itertools = "0.10.0"
|
itertools = "0.10.0"
|
||||||
|
@ -49,7 +49,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
|
|||||||
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
|
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
|
||||||
let fresh_validators_rewards: HashMap<u64, u64> = indices
|
let fresh_validators_rewards: HashMap<u64, u64> = indices
|
||||||
.iter()
|
.iter()
|
||||||
.map(|i| *i as u64)
|
.copied()
|
||||||
.flat_map(|validator_index| {
|
.flat_map(|validator_index| {
|
||||||
let reward = base::get_base_reward(
|
let reward = base::get_base_reward(
|
||||||
state,
|
state,
|
||||||
|
@ -12,7 +12,8 @@ pub use attestation::AttMaxCover;
|
|||||||
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
||||||
pub use max_cover::MaxCover;
|
pub use max_cover::MaxCover;
|
||||||
pub use persistence::{
|
pub use persistence::{
|
||||||
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5,
|
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
||||||
|
PersistedOperationPoolV5,
|
||||||
};
|
};
|
||||||
pub use reward_cache::RewardCache;
|
pub use reward_cache::RewardCache;
|
||||||
|
|
||||||
@ -51,7 +52,6 @@ pub struct OperationPool<T: EthSpec + Default> {
|
|||||||
/// Map from exiting validator to their exit data.
|
/// Map from exiting validator to their exit data.
|
||||||
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>,
|
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>,
|
||||||
/// Map from credential changing validator to their execution change data.
|
/// Map from credential changing validator to their execution change data.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
bls_to_execution_changes: RwLock<HashMap<u64, SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
|
bls_to_execution_changes: RwLock<HashMap<u64, SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
|
||||||
/// Reward cache for accelerating attestation packing.
|
/// Reward cache for accelerating attestation packing.
|
||||||
reward_cache: RwLock<RewardCache>,
|
reward_cache: RwLock<RewardCache>,
|
||||||
@ -518,17 +518,10 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
&self,
|
&self,
|
||||||
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
|
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
|
||||||
) {
|
) {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
self.bls_to_execution_changes.write().insert(
|
||||||
{
|
verified_change.as_inner().message.validator_index,
|
||||||
self.bls_to_execution_changes.write().insert(
|
verified_change,
|
||||||
verified_change.as_inner().message.validator_index,
|
);
|
||||||
verified_change,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
drop(verified_change);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of execution changes for inclusion in a block.
|
/// Get a list of execution changes for inclusion in a block.
|
||||||
@ -539,32 +532,19 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Vec<SignedBlsToExecutionChange> {
|
) -> Vec<SignedBlsToExecutionChange> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
filter_limit_operations(
|
||||||
{
|
self.bls_to_execution_changes.read().values(),
|
||||||
filter_limit_operations(
|
|address_change| {
|
||||||
self.bls_to_execution_changes.read().values(),
|
address_change.signature_is_still_valid(&state.fork())
|
||||||
|address_change| {
|
&& state
|
||||||
address_change.signature_is_still_valid(&state.fork())
|
.get_validator(address_change.as_inner().message.validator_index as usize)
|
||||||
&& state
|
.map_or(false, |validator| {
|
||||||
.get_validator(
|
!validator.has_eth1_withdrawal_credential(spec)
|
||||||
address_change.as_inner().message.validator_index as usize,
|
})
|
||||||
)
|
},
|
||||||
.map_or(false, |validator| {
|
|address_change| address_change.as_inner().clone(),
|
||||||
!validator.has_eth1_withdrawal_credential(spec)
|
T::MaxBlsToExecutionChanges::to_usize(),
|
||||||
})
|
)
|
||||||
},
|
|
||||||
|address_change| address_change.as_inner().clone(),
|
|
||||||
T::MaxBlsToExecutionChanges::to_usize(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrwals-processing is removed
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
#[allow(clippy::drop_copy)]
|
|
||||||
drop((state, spec));
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
|
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
|
||||||
@ -579,32 +559,22 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
head_state: &BeaconState<T>,
|
head_state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) {
|
) {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
prune_validator_hash_map(
|
||||||
{
|
&mut self.bls_to_execution_changes.write(),
|
||||||
prune_validator_hash_map(
|
|validator_index, validator| {
|
||||||
&mut self.bls_to_execution_changes.write(),
|
validator.has_eth1_withdrawal_credential(spec)
|
||||||
|validator_index, validator| {
|
&& head_block
|
||||||
validator.has_eth1_withdrawal_credential(spec)
|
.message()
|
||||||
&& head_block
|
.body()
|
||||||
.message()
|
.bls_to_execution_changes()
|
||||||
.body()
|
.map_or(true, |recent_changes| {
|
||||||
.bls_to_execution_changes()
|
!recent_changes
|
||||||
.map_or(true, |recent_changes| {
|
.iter()
|
||||||
!recent_changes
|
.any(|c| c.message.validator_index == validator_index)
|
||||||
.iter()
|
})
|
||||||
.any(|c| c.message.validator_index == validator_index)
|
},
|
||||||
})
|
head_state,
|
||||||
},
|
);
|
||||||
head_state,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrwals-processing is removed
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
#[allow(clippy::drop_copy)]
|
|
||||||
drop((head_block, head_state, spec));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prune all types of transactions given the latest head state and head fork.
|
/// Prune all types of transactions given the latest head state and head fork.
|
||||||
@ -691,17 +661,11 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
///
|
///
|
||||||
/// This method may return objects that are invalid for block inclusion.
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
pub fn get_all_bls_to_execution_changes(&self) -> Vec<SignedBlsToExecutionChange> {
|
pub fn get_all_bls_to_execution_changes(&self) -> Vec<SignedBlsToExecutionChange> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
self.bls_to_execution_changes
|
||||||
{
|
.read()
|
||||||
self.bls_to_execution_changes
|
.iter()
|
||||||
.read()
|
.map(|(_, address_change)| address_change.as_inner().clone())
|
||||||
.iter()
|
.collect()
|
||||||
.map(|(_, address_change)| address_change.as_inner().clone())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
vec![]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1787,7 +1751,7 @@ mod release_tests {
|
|||||||
|
|
||||||
fn cross_fork_harness<E: EthSpec>() -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec)
|
fn cross_fork_harness<E: EthSpec>() -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec)
|
||||||
{
|
{
|
||||||
let mut spec = test_spec::<E>();
|
let mut spec = E::default_spec();
|
||||||
|
|
||||||
// Give some room to sign surround slashings.
|
// Give some room to sign surround slashings.
|
||||||
spec.altair_fork_epoch = Some(Epoch::new(3));
|
spec.altair_fork_epoch = Some(Epoch::new(3));
|
||||||
|
@ -18,7 +18,7 @@ type PersistedSyncContributions<T> = Vec<(SyncAggregateId, Vec<SyncCommitteeCont
|
|||||||
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
||||||
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
||||||
#[superstruct(
|
#[superstruct(
|
||||||
variants(V5, V12),
|
variants(V5, V12, V14),
|
||||||
variant_attributes(
|
variant_attributes(
|
||||||
derive(Derivative, PartialEq, Debug, Encode, Decode),
|
derive(Derivative, PartialEq, Debug, Encode, Decode),
|
||||||
derivative(Clone),
|
derivative(Clone),
|
||||||
@ -32,7 +32,7 @@ pub struct PersistedOperationPool<T: EthSpec> {
|
|||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>,
|
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>,
|
||||||
/// Attestations and their attesting indices.
|
/// Attestations and their attesting indices.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub attestations: Vec<(Attestation<T>, Vec<u64>)>,
|
pub attestations: Vec<(Attestation<T>, Vec<u64>)>,
|
||||||
/// Mapping from sync contribution ID to sync contributions and aggregate.
|
/// Mapping from sync contribution ID to sync contributions and aggregate.
|
||||||
pub sync_contributions: PersistedSyncContributions<T>,
|
pub sync_contributions: PersistedSyncContributions<T>,
|
||||||
@ -40,20 +40,23 @@ pub struct PersistedOperationPool<T: EthSpec> {
|
|||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>,
|
pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>,
|
||||||
/// Attester slashings.
|
/// Attester slashings.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>,
|
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>,
|
||||||
/// [DEPRECATED] Proposer slashings.
|
/// [DEPRECATED] Proposer slashings.
|
||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub proposer_slashings_v5: Vec<ProposerSlashing>,
|
pub proposer_slashings_v5: Vec<ProposerSlashing>,
|
||||||
/// Proposer slashings with fork information.
|
/// Proposer slashings with fork information.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>,
|
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>,
|
||||||
/// [DEPRECATED] Voluntary exits.
|
/// [DEPRECATED] Voluntary exits.
|
||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub voluntary_exits_v5: Vec<SignedVoluntaryExit>,
|
pub voluntary_exits_v5: Vec<SignedVoluntaryExit>,
|
||||||
/// Voluntary exits with fork information.
|
/// Voluntary exits with fork information.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>,
|
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>,
|
||||||
|
/// BLS to Execution Changes
|
||||||
|
#[superstruct(only(V14))]
|
||||||
|
pub bls_to_execution_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, T>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> PersistedOperationPool<T> {
|
impl<T: EthSpec> PersistedOperationPool<T> {
|
||||||
@ -99,12 +102,20 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
|||||||
.map(|(_, exit)| exit.clone())
|
.map(|(_, exit)| exit.clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
PersistedOperationPool::V12(PersistedOperationPoolV12 {
|
let bls_to_execution_changes = operation_pool
|
||||||
|
.bls_to_execution_changes
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, bls_to_execution_change)| bls_to_execution_change.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
PersistedOperationPool::V14(PersistedOperationPoolV14 {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,24 +138,41 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
|||||||
);
|
);
|
||||||
let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect());
|
let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect());
|
||||||
let attestations = match self {
|
let attestations = match self {
|
||||||
PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant),
|
PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => {
|
||||||
PersistedOperationPool::V12(pool) => {
|
return Err(OpPoolError::IncorrectOpPoolVariant)
|
||||||
|
}
|
||||||
|
PersistedOperationPool::V14(ref pool) => {
|
||||||
let mut map = AttestationMap::default();
|
let mut map = AttestationMap::default();
|
||||||
for (att, attesting_indices) in pool.attestations {
|
for (att, attesting_indices) in pool.attestations.clone() {
|
||||||
map.insert(att, attesting_indices);
|
map.insert(att, attesting_indices);
|
||||||
}
|
}
|
||||||
RwLock::new(map)
|
RwLock::new(map)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let bls_to_execution_changes = match self {
|
||||||
|
PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => {
|
||||||
|
return Err(OpPoolError::IncorrectOpPoolVariant)
|
||||||
|
}
|
||||||
|
PersistedOperationPool::V14(pool) => RwLock::new(
|
||||||
|
pool.bls_to_execution_changes
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(|bls_to_execution_change| {
|
||||||
|
(
|
||||||
|
bls_to_execution_change.as_inner().message.validator_index,
|
||||||
|
bls_to_execution_change,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
),
|
||||||
|
};
|
||||||
let op_pool = OperationPool {
|
let op_pool = OperationPool {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
// FIXME(capella): implement schema migration for address changes in op pool
|
bls_to_execution_changes,
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
bls_to_execution_changes: Default::default(),
|
|
||||||
reward_cache: Default::default(),
|
reward_cache: Default::default(),
|
||||||
_phantom: Default::default(),
|
_phantom: Default::default(),
|
||||||
};
|
};
|
||||||
@ -166,6 +194,20 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV5<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> StoreItem for PersistedOperationPoolV12<T> {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::OpPool
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||||
|
PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
||||||
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
@ -178,8 +220,8 @@ impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
|||||||
|
|
||||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||||
// Default deserialization to the latest variant.
|
// Default deserialization to the latest variant.
|
||||||
PersistedOperationPoolV12::from_ssz_bytes(bytes)
|
PersistedOperationPoolV14::from_ssz_bytes(bytes)
|
||||||
.map(Self::V12)
|
.map(Self::V14)
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ use self::UpdatePattern::*;
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use typenum::Unsigned;
|
use typenum::Unsigned;
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
|
|
||||||
/// Description of how a `BeaconState` field is updated during state processing.
|
/// Description of how a `BeaconState` field is updated during state processing.
|
||||||
///
|
///
|
||||||
@ -26,7 +27,18 @@ use typenum::Unsigned;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub enum UpdatePattern {
|
pub enum UpdatePattern {
|
||||||
/// The value is updated once per `n` slots.
|
/// The value is updated once per `n` slots.
|
||||||
OncePerNSlots { n: u64 },
|
OncePerNSlots {
|
||||||
|
n: u64,
|
||||||
|
/// The slot at which the field begins to accumulate values.
|
||||||
|
///
|
||||||
|
/// The field should not be read or written until `activation_slot` is reached, and the
|
||||||
|
/// activation slot should act as an offset when converting slots to vector indices.
|
||||||
|
activation_slot: Option<Slot>,
|
||||||
|
/// The slot at which the field ceases to accumulate values.
|
||||||
|
///
|
||||||
|
/// If this is `None` then the field is continually updated.
|
||||||
|
deactivation_slot: Option<Slot>,
|
||||||
|
},
|
||||||
/// The value is updated once per epoch, for the epoch `current_epoch - lag`.
|
/// The value is updated once per epoch, for the epoch `current_epoch - lag`.
|
||||||
OncePerEpoch { lag: u64 },
|
OncePerEpoch { lag: u64 },
|
||||||
}
|
}
|
||||||
@ -98,12 +110,30 @@ pub trait Field<E: EthSpec>: Copy {
|
|||||||
fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) {
|
fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) {
|
||||||
// We take advantage of saturating subtraction on slots and epochs
|
// We take advantage of saturating subtraction on slots and epochs
|
||||||
match Self::update_pattern(spec) {
|
match Self::update_pattern(spec) {
|
||||||
OncePerNSlots { n } => {
|
OncePerNSlots {
|
||||||
|
n,
|
||||||
|
activation_slot,
|
||||||
|
deactivation_slot,
|
||||||
|
} => {
|
||||||
// Per-slot changes exclude the index for the current slot, because
|
// Per-slot changes exclude the index for the current slot, because
|
||||||
// it won't be set until the slot completes (think of `state_roots`, `block_roots`).
|
// it won't be set until the slot completes (think of `state_roots`, `block_roots`).
|
||||||
// This also works for the `historical_roots` because at the `n`th slot, the 0th
|
// This also works for the `historical_roots` because at the `n`th slot, the 0th
|
||||||
// entry of the list is created, and before that the list is empty.
|
// entry of the list is created, and before that the list is empty.
|
||||||
let end_vindex = current_slot / n;
|
//
|
||||||
|
// To account for the switch from historical roots to historical summaries at
|
||||||
|
// Capella we also modify the current slot by the activation and deactivation slots.
|
||||||
|
// The activation slot acts as an offset (subtraction) while the deactivation slot
|
||||||
|
// acts as a clamp (min).
|
||||||
|
let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| {
|
||||||
|
std::cmp::min(current_slot, deactivation_slot)
|
||||||
|
});
|
||||||
|
let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot {
|
||||||
|
slot_with_clamp - activation_slot
|
||||||
|
} else {
|
||||||
|
// Return (0, 0) to indicate that the field should not be read/written.
|
||||||
|
return (0, 0);
|
||||||
|
};
|
||||||
|
let end_vindex = slot_with_clamp_and_offset / n;
|
||||||
let start_vindex = end_vindex - Self::Length::to_u64();
|
let start_vindex = end_vindex - Self::Length::to_u64();
|
||||||
(start_vindex.as_usize(), end_vindex.as_usize())
|
(start_vindex.as_usize(), end_vindex.as_usize())
|
||||||
}
|
}
|
||||||
@ -295,7 +325,11 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::SlotsPerHistoricalRoot,
|
T::SlotsPerHistoricalRoot,
|
||||||
DBColumn::BeaconBlockRoots,
|
DBColumn::BeaconBlockRoots,
|
||||||
|_| OncePerNSlots { n: 1 },
|
|_| OncePerNSlots {
|
||||||
|
n: 1,
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: None
|
||||||
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -305,7 +339,11 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::SlotsPerHistoricalRoot,
|
T::SlotsPerHistoricalRoot,
|
||||||
DBColumn::BeaconStateRoots,
|
DBColumn::BeaconStateRoots,
|
||||||
|_| OncePerNSlots { n: 1 },
|
|_| OncePerNSlots {
|
||||||
|
n: 1,
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: None,
|
||||||
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -315,8 +353,12 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::HistoricalRootsLimit,
|
T::HistoricalRootsLimit,
|
||||||
DBColumn::BeaconHistoricalRoots,
|
DBColumn::BeaconHistoricalRoots,
|
||||||
|_| OncePerNSlots {
|
|spec: &ChainSpec| OncePerNSlots {
|
||||||
n: T::SlotsPerHistoricalRoot::to_u64()
|
n: T::SlotsPerHistoricalRoot::to_u64(),
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: spec
|
||||||
|
.capella_fork_epoch
|
||||||
|
.map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())),
|
||||||
},
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index)
|
||||||
);
|
);
|
||||||
@ -331,6 +373,27 @@ field!(
|
|||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
field!(
|
||||||
|
HistoricalSummaries,
|
||||||
|
VariableLengthField,
|
||||||
|
HistoricalSummary,
|
||||||
|
T::HistoricalRootsLimit,
|
||||||
|
DBColumn::BeaconHistoricalSummaries,
|
||||||
|
|spec: &ChainSpec| OncePerNSlots {
|
||||||
|
n: T::SlotsPerHistoricalRoot::to_u64(),
|
||||||
|
activation_slot: spec
|
||||||
|
.capella_fork_epoch
|
||||||
|
.map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())),
|
||||||
|
deactivation_slot: None,
|
||||||
|
},
|
||||||
|
|state: &BeaconState<_>, index, _| safe_modulo_index(
|
||||||
|
state
|
||||||
|
.historical_summaries()
|
||||||
|
.map_err(|_| ChunkError::InvalidFork)?,
|
||||||
|
index
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||||
field: F,
|
field: F,
|
||||||
store: &S,
|
store: &S,
|
||||||
@ -679,6 +742,7 @@ pub enum ChunkError {
|
|||||||
end_vindex: usize,
|
end_vindex: usize,
|
||||||
length: usize,
|
length: usize,
|
||||||
},
|
},
|
||||||
|
InvalidFork,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -801,7 +865,7 @@ mod test {
|
|||||||
|
|
||||||
fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) {
|
fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) {
|
||||||
let spec = &TestSpec::default_spec();
|
let spec = &TestSpec::default_spec();
|
||||||
let max = TestSpec::slots_per_epoch() as u64 * (F::Length::to_u64() - 1);
|
let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1);
|
||||||
for i in 0..max {
|
for i in 0..max {
|
||||||
assert!(
|
assert!(
|
||||||
F::slot_needs_genesis_value(Slot::new(i), spec),
|
F::slot_needs_genesis_value(Slot::new(i), spec),
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::chunked_vector::{
|
use crate::chunked_vector::{
|
||||||
store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots,
|
store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots,
|
||||||
};
|
};
|
||||||
use crate::config::{
|
use crate::config::{
|
||||||
OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT,
|
OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT,
|
||||||
@ -952,6 +952,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
store_updated_vector(StateRoots, db, state, &self.spec, ops)?;
|
store_updated_vector(StateRoots, db, state, &self.spec, ops)?;
|
||||||
store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?;
|
store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?;
|
||||||
store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?;
|
store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?;
|
||||||
|
store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?;
|
||||||
|
|
||||||
// 3. Store restore point.
|
// 3. Store restore point.
|
||||||
let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point;
|
let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point;
|
||||||
@ -1006,6 +1007,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
partial_state.load_state_roots(&self.cold_db, &self.spec)?;
|
partial_state.load_state_roots(&self.cold_db, &self.spec)?;
|
||||||
partial_state.load_historical_roots(&self.cold_db, &self.spec)?;
|
partial_state.load_historical_roots(&self.cold_db, &self.spec)?;
|
||||||
partial_state.load_randao_mixes(&self.cold_db, &self.spec)?;
|
partial_state.load_randao_mixes(&self.cold_db, &self.spec)?;
|
||||||
|
partial_state.load_historical_summaries(&self.cold_db, &self.spec)?;
|
||||||
|
|
||||||
partial_state.try_into()
|
partial_state.try_into()
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,8 @@ pub enum DBColumn {
|
|||||||
/// For Optimistically Imported Merge Transition Blocks
|
/// For Optimistically Imported Merge Transition Blocks
|
||||||
#[strum(serialize = "otb")]
|
#[strum(serialize = "otb")]
|
||||||
OptimisticTransitionBlock,
|
OptimisticTransitionBlock,
|
||||||
|
#[strum(serialize = "bhs")]
|
||||||
|
BeaconHistoricalSummaries,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A block from the database, which might have an execution payload or not.
|
/// A block from the database, which might have an execution payload or not.
|
||||||
|
@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use types::{Checkpoint, Hash256, Slot};
|
use types::{Checkpoint, Hash256, Slot};
|
||||||
|
|
||||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13);
|
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14);
|
||||||
|
|
||||||
// All the keys that get stored under the `BeaconMeta` column.
|
// All the keys that get stored under the `BeaconMeta` column.
|
||||||
//
|
//
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
use crate::chunked_vector::{
|
use crate::chunked_vector::{
|
||||||
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes,
|
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots,
|
||||||
StateRoots,
|
HistoricalSummaries, RandaoMixes, StateRoots,
|
||||||
};
|
};
|
||||||
use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp};
|
use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp};
|
||||||
use ssz::{Decode, DecodeError, Encode};
|
use ssz::{Decode, DecodeError, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
use types::superstruct;
|
use types::superstruct;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -104,16 +105,20 @@ where
|
|||||||
)]
|
)]
|
||||||
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
||||||
|
|
||||||
// Withdrawals
|
// Capella
|
||||||
#[superstruct(only(Capella, Eip4844))]
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
pub next_withdrawal_index: u64,
|
pub next_withdrawal_index: u64,
|
||||||
#[superstruct(only(Capella, Eip4844))]
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
pub next_withdrawal_validator_index: u64,
|
pub next_withdrawal_validator_index: u64,
|
||||||
|
|
||||||
|
#[ssz(skip_serializing, skip_deserializing)]
|
||||||
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
|
pub historical_summaries: Option<VariableList<HistoricalSummary, T::HistoricalRootsLimit>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implement the conversion function from BeaconState -> PartialBeaconState.
|
/// Implement the conversion function from BeaconState -> PartialBeaconState.
|
||||||
macro_rules! impl_from_state_forgetful {
|
macro_rules! impl_from_state_forgetful {
|
||||||
($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
|
($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => {
|
||||||
PartialBeaconState::$variant_name($struct_name {
|
PartialBeaconState::$variant_name($struct_name {
|
||||||
// Versioning
|
// Versioning
|
||||||
genesis_time: $s.genesis_time,
|
genesis_time: $s.genesis_time,
|
||||||
@ -154,6 +159,11 @@ macro_rules! impl_from_state_forgetful {
|
|||||||
// Variant-specific fields
|
// Variant-specific fields
|
||||||
$(
|
$(
|
||||||
$extra_fields: $s.$extra_fields.clone()
|
$extra_fields: $s.$extra_fields.clone()
|
||||||
|
),*,
|
||||||
|
|
||||||
|
// Variant-specific optional
|
||||||
|
$(
|
||||||
|
$extra_fields_opt: None
|
||||||
),*
|
),*
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -168,7 +178,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
outer,
|
outer,
|
||||||
Base,
|
Base,
|
||||||
PartialBeaconStateBase,
|
PartialBeaconStateBase,
|
||||||
[previous_epoch_attestations, current_epoch_attestations]
|
[previous_epoch_attestations, current_epoch_attestations],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Altair(s) => impl_from_state_forgetful!(
|
BeaconState::Altair(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -181,7 +192,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
current_sync_committee,
|
current_sync_committee,
|
||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores
|
inactivity_scores
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Merge(s) => impl_from_state_forgetful!(
|
BeaconState::Merge(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -195,7 +207,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores,
|
inactivity_scores,
|
||||||
latest_execution_payload_header
|
latest_execution_payload_header
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Capella(s) => impl_from_state_forgetful!(
|
BeaconState::Capella(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -211,7 +224,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
BeaconState::Eip4844(s) => impl_from_state_forgetful!(
|
BeaconState::Eip4844(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -227,7 +241,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -303,6 +318,23 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn load_historical_summaries<S: KeyValueStore<T>>(
|
||||||
|
&mut self,
|
||||||
|
store: &S,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let slot = self.slot();
|
||||||
|
if let Ok(historical_summaries) = self.historical_summaries_mut() {
|
||||||
|
if historical_summaries.is_none() {
|
||||||
|
*historical_summaries =
|
||||||
|
Some(load_variable_list_from_db::<HistoricalSummaries, T, _>(
|
||||||
|
store, slot, spec,
|
||||||
|
)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load_randao_mixes<S: KeyValueStore<T>>(
|
pub fn load_randao_mixes<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
@ -326,7 +358,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
|
|
||||||
/// Implement the conversion from PartialBeaconState -> BeaconState.
|
/// Implement the conversion from PartialBeaconState -> BeaconState.
|
||||||
macro_rules! impl_try_into_beacon_state {
|
macro_rules! impl_try_into_beacon_state {
|
||||||
($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
|
($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => {
|
||||||
BeaconState::$variant_name($struct_name {
|
BeaconState::$variant_name($struct_name {
|
||||||
// Versioning
|
// Versioning
|
||||||
genesis_time: $inner.genesis_time,
|
genesis_time: $inner.genesis_time,
|
||||||
@ -371,6 +403,11 @@ macro_rules! impl_try_into_beacon_state {
|
|||||||
// Variant-specific fields
|
// Variant-specific fields
|
||||||
$(
|
$(
|
||||||
$extra_fields: $inner.$extra_fields
|
$extra_fields: $inner.$extra_fields
|
||||||
|
),*,
|
||||||
|
|
||||||
|
// Variant-specific optional fields
|
||||||
|
$(
|
||||||
|
$extra_opt_fields: unpack_field($inner.$extra_opt_fields)?
|
||||||
),*
|
),*
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -389,7 +426,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
inner,
|
inner,
|
||||||
Base,
|
Base,
|
||||||
BeaconStateBase,
|
BeaconStateBase,
|
||||||
[previous_epoch_attestations, current_epoch_attestations]
|
[previous_epoch_attestations, current_epoch_attestations],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -401,7 +439,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
current_sync_committee,
|
current_sync_committee,
|
||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores
|
inactivity_scores
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -414,7 +453,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores,
|
inactivity_scores,
|
||||||
latest_execution_payload_header
|
latest_execution_payload_header
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -429,7 +469,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -444,7 +485,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
Ok(state)
|
Ok(state)
|
||||||
|
@ -12,10 +12,10 @@ This number can be much higher depending on how many other validators are queued
|
|||||||
|
|
||||||
## Withdrawal of exited funds
|
## Withdrawal of exited funds
|
||||||
|
|
||||||
Even though users can perform a voluntary exit in phase 0, they **cannot withdraw their exited funds at this point in time**.
|
Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**.
|
||||||
This implies that the staked funds are effectively **frozen** until withdrawals are enabled in future phases.
|
This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella).
|
||||||
|
|
||||||
To understand the phased rollout strategy for Ethereum upgrades, please visit <https://ethereum.org/en/upgrades/#roadmap>.
|
To understand the rollout strategy for Ethereum upgrades, please visit <https://ethereum.org/en/upgrades>.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ impl ValidatorDefinitions {
|
|||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.create_new(false)
|
.create_new(false)
|
||||||
.open(&config_path)
|
.open(config_path)
|
||||||
.map_err(Error::UnableToOpenFile)?;
|
.map_err(Error::UnableToOpenFile)?;
|
||||||
serde_yaml::from_reader(file).map_err(Error::UnableToParseFile)
|
serde_yaml::from_reader(file).map_err(Error::UnableToParseFile)
|
||||||
}
|
}
|
||||||
|
@ -35,4 +35,3 @@ procinfo = { version = "0.4.2", optional = true }
|
|||||||
[features]
|
[features]
|
||||||
default = ["lighthouse"]
|
default = ["lighthouse"]
|
||||||
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
||||||
withdrawals-processing = ["store/withdrawals-processing"]
|
|
@ -628,27 +628,6 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `POST beacon/blobs`
|
|
||||||
///
|
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
|
||||||
pub async fn post_beacon_blobs<T: EthSpec>(
|
|
||||||
&self,
|
|
||||||
block: &BlobsSidecar<T>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut path = self.eth_path(V1)?;
|
|
||||||
|
|
||||||
path.path_segments_mut()
|
|
||||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
|
||||||
.push("beacon")
|
|
||||||
.push("blobs");
|
|
||||||
|
|
||||||
//FIXME(sean) should we re-use the proposal timeout? seems reasonable to..
|
|
||||||
self.post_with_timeout(path, block, self.timeouts.proposal)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `POST beacon/blinded_blocks`
|
/// `POST beacon/blinded_blocks`
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
|
@ -196,7 +196,7 @@ impl<'a> Builder<'a> {
|
|||||||
if path.exists() {
|
if path.exists() {
|
||||||
return Err(Error::DepositDataAlreadyExists(path));
|
return Err(Error::DepositDataAlreadyExists(path));
|
||||||
} else {
|
} else {
|
||||||
let hex = format!("0x{}", hex::encode(&deposit_data));
|
let hex = format!("0x{}", hex::encode(deposit_data));
|
||||||
File::options()
|
File::options()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
|
@ -63,15 +63,15 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn encoding() {
|
fn encoding() {
|
||||||
let bytes = vec![0, 255];
|
let bytes = vec![0, 255];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x00ff");
|
assert_eq!(hex.as_str(), "0x00ff");
|
||||||
|
|
||||||
let bytes = vec![];
|
let bytes = vec![];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x");
|
assert_eq!(hex.as_str(), "0x");
|
||||||
|
|
||||||
let bytes = vec![1, 2, 3];
|
let bytes = vec![1, 2, 3];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x010203");
|
assert_eq!(hex.as_str(), "0x010203");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ impl<'de> Visitor<'de> for QuantityVisitor {
|
|||||||
} else if stripped.starts_with('0') {
|
} else if stripped.starts_with('0') {
|
||||||
Err(de::Error::custom("cannot have leading zero"))
|
Err(de::Error::custom("cannot have leading zero"))
|
||||||
} else if stripped.len() % 2 != 0 {
|
} else if stripped.len() % 2 != 0 {
|
||||||
hex::decode(&format!("0{}", stripped))
|
hex::decode(format!("0{}", stripped))
|
||||||
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
||||||
} else {
|
} else {
|
||||||
hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
||||||
|
@ -19,7 +19,6 @@ pub use process_operations::process_operations;
|
|||||||
pub use verify_attestation::{
|
pub use verify_attestation::{
|
||||||
verify_attestation_for_block_inclusion, verify_attestation_for_state,
|
verify_attestation_for_block_inclusion, verify_attestation_for_state,
|
||||||
};
|
};
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub use verify_bls_to_execution_change::verify_bls_to_execution_change;
|
pub use verify_bls_to_execution_change::verify_bls_to_execution_change;
|
||||||
pub use verify_deposit::{
|
pub use verify_deposit::{
|
||||||
get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature,
|
get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature,
|
||||||
@ -36,12 +35,13 @@ pub mod signature_sets;
|
|||||||
pub mod tests;
|
pub mod tests;
|
||||||
mod verify_attestation;
|
mod verify_attestation;
|
||||||
mod verify_attester_slashing;
|
mod verify_attester_slashing;
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
mod verify_bls_to_execution_change;
|
mod verify_bls_to_execution_change;
|
||||||
mod verify_deposit;
|
mod verify_deposit;
|
||||||
mod verify_exit;
|
mod verify_exit;
|
||||||
mod verify_proposer_slashing;
|
mod verify_proposer_slashing;
|
||||||
|
|
||||||
|
use crate::common::decrease_balance;
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary-fuzz")]
|
#[cfg(feature = "arbitrary-fuzz")]
|
||||||
use arbitrary::Arbitrary;
|
use arbitrary::Arbitrary;
|
||||||
|
|
||||||
@ -162,7 +162,6 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
// previous block.
|
// previous block.
|
||||||
if is_execution_enabled(state, block.body()) {
|
if is_execution_enabled(state, block.body()) {
|
||||||
let payload = block.body().execution_payload()?;
|
let payload = block.body().execution_payload()?;
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
process_withdrawals::<T, Payload>(state, payload, spec)?;
|
process_withdrawals::<T, Payload>(state, payload, spec)?;
|
||||||
process_execution_payload::<T, Payload>(state, payload, spec)?;
|
process_execution_payload::<T, Payload>(state, payload, spec)?;
|
||||||
}
|
}
|
||||||
@ -524,12 +523,14 @@ pub fn get_expected_withdrawals<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Apply withdrawals to the state.
|
/// Apply withdrawals to the state.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
payload: Payload::Ref<'payload>,
|
payload: Payload::Ref<'payload>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
|
if cfg!(not(feature = "withdrawals-processing")) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
match state {
|
match state {
|
||||||
BeaconState::Merge(_) => Ok(()),
|
BeaconState::Merge(_) => Ok(()),
|
||||||
BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
|
BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
|
||||||
|
@ -52,10 +52,10 @@ pub fn process_sync_aggregate<T: EthSpec>(
|
|||||||
.zip(aggregate.sync_committee_bits.iter())
|
.zip(aggregate.sync_committee_bits.iter())
|
||||||
{
|
{
|
||||||
if participation_bit {
|
if participation_bit {
|
||||||
increase_balance(state, participant_index as usize, participant_reward)?;
|
increase_balance(state, participant_index, participant_reward)?;
|
||||||
increase_balance(state, proposer_index as usize, proposer_reward)?;
|
increase_balance(state, proposer_index as usize, proposer_reward)?;
|
||||||
} else {
|
} else {
|
||||||
decrease_balance(state, participant_index as usize, participant_reward)?;
|
decrease_balance(state, participant_index, participant_reward)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ fn tx_peek_blob_versioned_hashes<T: EthSpec>(
|
|||||||
.get(next_version_hash_index..next_version_hash_index.safe_add(32)?)
|
.get(next_version_hash_index..next_version_hash_index.safe_add(32)?)
|
||||||
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
|
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
|
||||||
length: tx_len,
|
length: tx_len,
|
||||||
index: (next_version_hash_index as usize).safe_add(32)?,
|
index: (next_version_hash_index).safe_add(32)?,
|
||||||
})?;
|
})?;
|
||||||
Ok(VersionedHash::from_slice(bytes))
|
Ok(VersionedHash::from_slice(bytes))
|
||||||
}))
|
}))
|
||||||
|
@ -34,7 +34,6 @@ pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
process_deposits(state, block_body.deposits(), spec)?;
|
process_deposits(state, block_body.deposits(), spec)?;
|
||||||
process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?;
|
process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?;
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() {
|
if let Ok(bls_to_execution_changes) = block_body.bls_to_execution_changes() {
|
||||||
process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?;
|
process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?;
|
||||||
}
|
}
|
||||||
@ -295,13 +294,15 @@ pub fn process_exits<T: EthSpec>(
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub fn process_bls_to_execution_changes<T: EthSpec>(
|
pub fn process_bls_to_execution_changes<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
bls_to_execution_changes: &[SignedBlsToExecutionChange],
|
bls_to_execution_changes: &[SignedBlsToExecutionChange],
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
|
if cfg!(not(feature = "withdrawals-processing")) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() {
|
for (i, signed_address_change) in bls_to_execution_changes.iter().enumerate() {
|
||||||
verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec)
|
verify_bls_to_execution_change(state, signed_address_change, verify_signatures, spec)
|
||||||
.map_err(|e| e.into_with_index(i))?;
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
@ -3,14 +3,16 @@
|
|||||||
pub use epoch_processing_summary::EpochProcessingSummary;
|
pub use epoch_processing_summary::EpochProcessingSummary;
|
||||||
use errors::EpochProcessingError as Error;
|
use errors::EpochProcessingError as Error;
|
||||||
pub use justification_and_finalization_state::JustificationAndFinalizationState;
|
pub use justification_and_finalization_state::JustificationAndFinalizationState;
|
||||||
pub use registry_updates::process_registry_updates;
|
|
||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
pub use slashings::process_slashings;
|
|
||||||
use types::{BeaconState, ChainSpec, EthSpec};
|
use types::{BeaconState, ChainSpec, EthSpec};
|
||||||
|
|
||||||
|
pub use registry_updates::process_registry_updates;
|
||||||
|
pub use slashings::process_slashings;
|
||||||
pub use weigh_justification_and_finalization::weigh_justification_and_finalization;
|
pub use weigh_justification_and_finalization::weigh_justification_and_finalization;
|
||||||
|
|
||||||
pub mod altair;
|
pub mod altair;
|
||||||
pub mod base;
|
pub mod base;
|
||||||
|
pub mod capella;
|
||||||
pub mod effective_balance_updates;
|
pub mod effective_balance_updates;
|
||||||
pub mod epoch_processing_summary;
|
pub mod epoch_processing_summary;
|
||||||
pub mod errors;
|
pub mod errors;
|
||||||
@ -37,10 +39,8 @@ pub fn process_epoch<T: EthSpec>(
|
|||||||
|
|
||||||
match state {
|
match state {
|
||||||
BeaconState::Base(_) => base::process_epoch(state, spec),
|
BeaconState::Base(_) => base::process_epoch(state, spec),
|
||||||
BeaconState::Altair(_)
|
BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec),
|
||||||
| BeaconState::Merge(_)
|
BeaconState::Capella(_) | BeaconState::Eip4844(_) => capella::process_epoch(state, spec),
|
||||||
| BeaconState::Capella(_)
|
|
||||||
| BeaconState::Eip4844(_) => altair::process_epoch(state, spec),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ pub fn get_flag_index_deltas<T: EthSpec>(
|
|||||||
let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?;
|
let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?;
|
||||||
let mut delta = Delta::default();
|
let mut delta = Delta::default();
|
||||||
|
|
||||||
if unslashed_participating_indices.contains(index as usize)? {
|
if unslashed_participating_indices.contains(index)? {
|
||||||
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
if !state.is_in_inactivity_leak(previous_epoch, spec) {
|
||||||
let reward_numerator = base_reward
|
let reward_numerator = base_reward
|
||||||
.safe_mul(weight)?
|
.safe_mul(weight)?
|
||||||
@ -89,8 +89,8 @@ pub fn get_flag_index_deltas<T: EthSpec>(
|
|||||||
delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?;
|
delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?;
|
||||||
}
|
}
|
||||||
deltas
|
deltas
|
||||||
.get_mut(index as usize)
|
.get_mut(index)
|
||||||
.ok_or(Error::DeltaOutOfBounds(index as usize))?
|
.ok_or(Error::DeltaOutOfBounds(index))?
|
||||||
.combine(delta)?;
|
.combine(delta)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -235,7 +235,7 @@ fn get_inclusion_delay_delta(
|
|||||||
let max_attester_reward = base_reward.safe_sub(proposer_reward)?;
|
let max_attester_reward = base_reward.safe_sub(proposer_reward)?;
|
||||||
delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?;
|
delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?;
|
||||||
|
|
||||||
let proposer_index = inclusion_info.proposer_index as usize;
|
let proposer_index = inclusion_info.proposer_index;
|
||||||
Ok((delta, Some((proposer_index, proposer_delta))))
|
Ok((delta, Some((proposer_index, proposer_delta))))
|
||||||
} else {
|
} else {
|
||||||
Ok((Delta::default(), None))
|
Ok((Delta::default(), None))
|
||||||
|
@ -0,0 +1,78 @@
|
|||||||
|
use super::altair::inactivity_updates::process_inactivity_updates;
|
||||||
|
use super::altair::justification_and_finalization::process_justification_and_finalization;
|
||||||
|
use super::altair::participation_cache::ParticipationCache;
|
||||||
|
use super::altair::participation_flag_updates::process_participation_flag_updates;
|
||||||
|
use super::altair::rewards_and_penalties::process_rewards_and_penalties;
|
||||||
|
use super::altair::sync_committee_updates::process_sync_committee_updates;
|
||||||
|
use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error};
|
||||||
|
use crate::per_epoch_processing::{
|
||||||
|
effective_balance_updates::process_effective_balance_updates,
|
||||||
|
resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset},
|
||||||
|
};
|
||||||
|
use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch};
|
||||||
|
|
||||||
|
pub use historical_summaries_update::process_historical_summaries_update;
|
||||||
|
|
||||||
|
mod historical_summaries_update;
|
||||||
|
|
||||||
|
pub fn process_epoch<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<EpochProcessingSummary<T>, Error> {
|
||||||
|
// Ensure the committee caches are built.
|
||||||
|
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||||
|
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||||
|
state.build_committee_cache(RelativeEpoch::Next, spec)?;
|
||||||
|
|
||||||
|
// Pre-compute participating indices and total balances.
|
||||||
|
let participation_cache = ParticipationCache::new(state, spec)?;
|
||||||
|
let sync_committee = state.current_sync_committee()?.clone();
|
||||||
|
|
||||||
|
// Justification and finalization.
|
||||||
|
let justification_and_finalization_state =
|
||||||
|
process_justification_and_finalization(state, &participation_cache)?;
|
||||||
|
justification_and_finalization_state.apply_changes_to_state(state);
|
||||||
|
|
||||||
|
process_inactivity_updates(state, &participation_cache, spec)?;
|
||||||
|
|
||||||
|
// Rewards and Penalties.
|
||||||
|
process_rewards_and_penalties(state, &participation_cache, spec)?;
|
||||||
|
|
||||||
|
// Registry Updates.
|
||||||
|
process_registry_updates(state, spec)?;
|
||||||
|
|
||||||
|
// Slashings.
|
||||||
|
process_slashings(
|
||||||
|
state,
|
||||||
|
participation_cache.current_epoch_total_active_balance(),
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Reset eth1 data votes.
|
||||||
|
process_eth1_data_reset(state)?;
|
||||||
|
|
||||||
|
// Update effective balances with hysteresis (lag).
|
||||||
|
process_effective_balance_updates(state, spec)?;
|
||||||
|
|
||||||
|
// Reset slashings
|
||||||
|
process_slashings_reset(state)?;
|
||||||
|
|
||||||
|
// Set randao mix
|
||||||
|
process_randao_mixes_reset(state)?;
|
||||||
|
|
||||||
|
// Set historical summaries accumulator
|
||||||
|
process_historical_summaries_update(state)?;
|
||||||
|
|
||||||
|
// Rotate current/previous epoch participation
|
||||||
|
process_participation_flag_updates(state)?;
|
||||||
|
|
||||||
|
process_sync_committee_updates(state, spec)?;
|
||||||
|
|
||||||
|
// Rotate the epoch caches to suit the epoch transition.
|
||||||
|
state.advance_caches(spec)?;
|
||||||
|
|
||||||
|
Ok(EpochProcessingSummary::Altair {
|
||||||
|
participation_cache,
|
||||||
|
sync_committee,
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,23 @@
|
|||||||
|
use crate::EpochProcessingError;
|
||||||
|
use safe_arith::SafeArith;
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
|
use types::{BeaconState, EthSpec};
|
||||||
|
|
||||||
|
pub fn process_historical_summaries_update<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
) -> Result<(), EpochProcessingError> {
|
||||||
|
// Set historical block root accumulator.
|
||||||
|
let next_epoch = state.next_epoch()?;
|
||||||
|
if next_epoch
|
||||||
|
.as_u64()
|
||||||
|
.safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)?
|
||||||
|
== 0
|
||||||
|
{
|
||||||
|
let summary = HistoricalSummary::new(state);
|
||||||
|
return state
|
||||||
|
.historical_summaries_mut()?
|
||||||
|
.push(summary)
|
||||||
|
.map_err(Into::into);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,3 +1,4 @@
|
|||||||
|
use ssz_types::VariableList;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork};
|
use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork};
|
||||||
|
|
||||||
@ -55,9 +56,10 @@ pub fn upgrade_to_capella<E: EthSpec>(
|
|||||||
next_sync_committee: pre.next_sync_committee.clone(),
|
next_sync_committee: pre.next_sync_committee.clone(),
|
||||||
// Execution
|
// Execution
|
||||||
latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(),
|
latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_capella(),
|
||||||
// Withdrawals
|
// Capella
|
||||||
next_withdrawal_index: 0,
|
next_withdrawal_index: 0,
|
||||||
next_withdrawal_validator_index: 0,
|
next_withdrawal_validator_index: 0,
|
||||||
|
historical_summaries: VariableList::default(),
|
||||||
// Caches
|
// Caches
|
||||||
total_active_balance: pre.total_active_balance,
|
total_active_balance: pre.total_active_balance,
|
||||||
committee_caches: mem::take(&mut pre.committee_caches),
|
committee_caches: mem::take(&mut pre.committee_caches),
|
||||||
|
@ -57,9 +57,10 @@ pub fn upgrade_to_eip4844<E: EthSpec>(
|
|||||||
next_sync_committee: pre.next_sync_committee.clone(),
|
next_sync_committee: pre.next_sync_committee.clone(),
|
||||||
// Execution
|
// Execution
|
||||||
latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(),
|
latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_eip4844(),
|
||||||
// Withdrawals
|
// Capella
|
||||||
next_withdrawal_index: pre.next_withdrawal_index,
|
next_withdrawal_index: pre.next_withdrawal_index,
|
||||||
next_withdrawal_validator_index: pre.next_withdrawal_validator_index,
|
next_withdrawal_validator_index: pre.next_withdrawal_validator_index,
|
||||||
|
historical_summaries: pre.historical_summaries.clone(),
|
||||||
// Caches
|
// Caches
|
||||||
total_active_balance: pre.total_active_balance,
|
total_active_balance: pre.total_active_balance,
|
||||||
committee_caches: mem::take(&mut pre.committee_caches),
|
committee_caches: mem::take(&mut pre.committee_caches),
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
use crate::per_block_processing::{
|
use crate::per_block_processing::{
|
||||||
errors::{
|
errors::{
|
||||||
AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError,
|
AttesterSlashingValidationError, BlsExecutionChangeValidationError, ExitValidationError,
|
||||||
|
ProposerSlashingValidationError,
|
||||||
},
|
},
|
||||||
verify_attester_slashing, verify_exit, verify_proposer_slashing,
|
verify_attester_slashing, verify_bls_to_execution_change, verify_exit,
|
||||||
|
verify_proposer_slashing,
|
||||||
};
|
};
|
||||||
use crate::VerifySignatures;
|
use crate::VerifySignatures;
|
||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
@ -12,15 +14,7 @@ use ssz_derive::{Decode, Encode};
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing,
|
AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing,
|
||||||
SignedVoluntaryExit,
|
SignedBlsToExecutionChange, SignedVoluntaryExit,
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use {
|
|
||||||
crate::per_block_processing::{
|
|
||||||
errors::BlsExecutionChangeValidationError, verify_bls_to_execution_change,
|
|
||||||
},
|
|
||||||
types::SignedBlsToExecutionChange,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const MAX_FORKS_VERIFIED_AGAINST: usize = 2;
|
const MAX_FORKS_VERIFIED_AGAINST: usize = 2;
|
||||||
@ -202,7 +196,6 @@ impl<E: EthSpec> VerifyOperation<E> for ProposerSlashing {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
impl<E: EthSpec> VerifyOperation<E> for SignedBlsToExecutionChange {
|
impl<E: EthSpec> VerifyOperation<E> for SignedBlsToExecutionChange {
|
||||||
type Error = BlsExecutionChangeValidationError;
|
type Error = BlsExecutionChangeValidationError;
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
|
use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
use std::hash::Hash;
|
||||||
use std::{fmt, mem, sync::Arc};
|
use std::{fmt, mem, sync::Arc};
|
||||||
use superstruct::superstruct;
|
use superstruct::superstruct;
|
||||||
use swap_or_not_shuffle::compute_shuffled_index;
|
use swap_or_not_shuffle::compute_shuffled_index;
|
||||||
@ -25,6 +26,7 @@ pub use self::committee_cache::{
|
|||||||
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
|
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
|
||||||
CommitteeCache,
|
CommitteeCache,
|
||||||
};
|
};
|
||||||
|
use crate::historical_summary::HistoricalSummary;
|
||||||
pub use clone_config::CloneConfig;
|
pub use clone_config::CloneConfig;
|
||||||
pub use eth_spec::*;
|
pub use eth_spec::*;
|
||||||
pub use iter::BlockRootsIter;
|
pub use iter::BlockRootsIter;
|
||||||
@ -223,6 +225,7 @@ where
|
|||||||
pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
|
pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
|
||||||
#[compare_fields(as_slice)]
|
#[compare_fields(as_slice)]
|
||||||
pub state_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
|
pub state_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
|
||||||
|
// Frozen in Capella, replaced by historical_summaries
|
||||||
pub historical_roots: VariableList<Hash256, T::HistoricalRootsLimit>,
|
pub historical_roots: VariableList<Hash256, T::HistoricalRootsLimit>,
|
||||||
|
|
||||||
// Ethereum 1.0 chain data
|
// Ethereum 1.0 chain data
|
||||||
@ -296,11 +299,14 @@ where
|
|||||||
)]
|
)]
|
||||||
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
||||||
|
|
||||||
// Withdrawals
|
// Capella
|
||||||
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
||||||
pub next_withdrawal_index: u64,
|
pub next_withdrawal_index: u64,
|
||||||
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
#[superstruct(only(Capella, Eip4844), partial_getter(copy))]
|
||||||
pub next_withdrawal_validator_index: u64,
|
pub next_withdrawal_validator_index: u64,
|
||||||
|
// Deep history valid from Capella onwards.
|
||||||
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
|
pub historical_summaries: VariableList<HistoricalSummary, T::HistoricalRootsLimit>,
|
||||||
|
|
||||||
// Caching (not in the spec)
|
// Caching (not in the spec)
|
||||||
#[serde(skip_serializing, skip_deserializing)]
|
#[serde(skip_serializing, skip_deserializing)]
|
||||||
@ -504,7 +510,7 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
/// Spec v0.12.1
|
/// Spec v0.12.1
|
||||||
pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result<u64, Error> {
|
pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result<u64, Error> {
|
||||||
let cache = self.committee_cache_at_slot(slot)?;
|
let cache = self.committee_cache_at_slot(slot)?;
|
||||||
Ok(cache.committees_per_slot() as u64)
|
Ok(cache.committees_per_slot())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute the number of committees in an entire epoch.
|
/// Compute the number of committees in an entire epoch.
|
||||||
|
@ -144,7 +144,7 @@ impl CommitteeCache {
|
|||||||
self.committees_per_slot as usize,
|
self.committees_per_slot as usize,
|
||||||
index as usize,
|
index as usize,
|
||||||
);
|
);
|
||||||
let committee = self.compute_committee(committee_index as usize)?;
|
let committee = self.compute_committee(committee_index)?;
|
||||||
|
|
||||||
Some(BeaconCommittee {
|
Some(BeaconCommittee {
|
||||||
slot,
|
slot,
|
||||||
|
@ -344,12 +344,7 @@ mod committees {
|
|||||||
|
|
||||||
let cache_epoch = cache_epoch.into_epoch(state_epoch);
|
let cache_epoch = cache_epoch.into_epoch(state_epoch);
|
||||||
|
|
||||||
execute_committee_consistency_test(
|
execute_committee_consistency_test(new_head_state, cache_epoch, validator_count, spec);
|
||||||
new_head_state,
|
|
||||||
cache_epoch,
|
|
||||||
validator_count as usize,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
|
async fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
|
||||||
@ -361,18 +356,13 @@ mod committees {
|
|||||||
.mul(spec.target_committee_size)
|
.mul(spec.target_committee_size)
|
||||||
.add(1);
|
.add(1);
|
||||||
|
|
||||||
committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch)
|
committee_consistency_test::<T>(validator_count, Epoch::new(0), cached_epoch).await;
|
||||||
|
|
||||||
|
committee_consistency_test::<T>(validator_count, T::genesis_epoch() + 4, cached_epoch)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
committee_consistency_test::<T>(
|
committee_consistency_test::<T>(
|
||||||
validator_count as usize,
|
validator_count,
|
||||||
T::genesis_epoch() + 4,
|
|
||||||
cached_epoch,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
committee_consistency_test::<T>(
|
|
||||||
validator_count as usize,
|
|
||||||
T::genesis_epoch()
|
T::genesis_epoch()
|
||||||
+ (T::slots_per_historical_root() as u64)
|
+ (T::slots_per_historical_root() as u64)
|
||||||
.mul(T::slots_per_epoch())
|
.mul(T::slots_per_epoch())
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#![allow(clippy::indexing_slicing)]
|
#![allow(clippy::indexing_slicing)]
|
||||||
|
|
||||||
use super::Error;
|
use super::Error;
|
||||||
|
use crate::historical_summary::HistoricalSummaryCache;
|
||||||
use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator};
|
use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator};
|
||||||
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
|
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
@ -142,6 +143,7 @@ pub struct BeaconTreeHashCacheInner<T: EthSpec> {
|
|||||||
block_roots: TreeHashCache,
|
block_roots: TreeHashCache,
|
||||||
state_roots: TreeHashCache,
|
state_roots: TreeHashCache,
|
||||||
historical_roots: TreeHashCache,
|
historical_roots: TreeHashCache,
|
||||||
|
historical_summaries: OptionalTreeHashCache,
|
||||||
balances: TreeHashCache,
|
balances: TreeHashCache,
|
||||||
randao_mixes: TreeHashCache,
|
randao_mixes: TreeHashCache,
|
||||||
slashings: TreeHashCache,
|
slashings: TreeHashCache,
|
||||||
@ -164,6 +166,14 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
let historical_roots = state
|
let historical_roots = state
|
||||||
.historical_roots()
|
.historical_roots()
|
||||||
.new_tree_hash_cache(&mut fixed_arena);
|
.new_tree_hash_cache(&mut fixed_arena);
|
||||||
|
let historical_summaries = OptionalTreeHashCache::new(
|
||||||
|
state
|
||||||
|
.historical_summaries()
|
||||||
|
.ok()
|
||||||
|
.map(HistoricalSummaryCache::new)
|
||||||
|
.as_ref(),
|
||||||
|
);
|
||||||
|
|
||||||
let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena);
|
let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena);
|
||||||
|
|
||||||
let validators = ValidatorsListTreeHashCache::new::<T>(state.validators());
|
let validators = ValidatorsListTreeHashCache::new::<T>(state.validators());
|
||||||
@ -200,6 +210,7 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
block_roots,
|
block_roots,
|
||||||
state_roots,
|
state_roots,
|
||||||
historical_roots,
|
historical_roots,
|
||||||
|
historical_summaries,
|
||||||
balances,
|
balances,
|
||||||
randao_mixes,
|
randao_mixes,
|
||||||
slashings,
|
slashings,
|
||||||
@ -249,6 +260,7 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
.slashings()
|
.slashings()
|
||||||
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?,
|
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?,
|
||||||
];
|
];
|
||||||
|
|
||||||
// Participation
|
// Participation
|
||||||
if let BeaconState::Base(state) = state {
|
if let BeaconState::Base(state) = state {
|
||||||
leaves.push(state.previous_epoch_attestations.tree_hash_root());
|
leaves.push(state.previous_epoch_attestations.tree_hash_root());
|
||||||
@ -291,6 +303,24 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
if let Ok(payload_header) = state.latest_execution_payload_header() {
|
if let Ok(payload_header) = state.latest_execution_payload_header() {
|
||||||
leaves.push(payload_header.tree_hash_root());
|
leaves.push(payload_header.tree_hash_root());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Withdrawal indices (Capella and later).
|
||||||
|
if let Ok(next_withdrawal_index) = state.next_withdrawal_index() {
|
||||||
|
leaves.push(next_withdrawal_index.tree_hash_root());
|
||||||
|
}
|
||||||
|
if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() {
|
||||||
|
leaves.push(next_withdrawal_validator_index.tree_hash_root());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Historical roots/summaries (Capella and later).
|
||||||
|
if let Ok(historical_summaries) = state.historical_summaries() {
|
||||||
|
leaves.push(
|
||||||
|
self.historical_summaries.recalculate_tree_hash_root(
|
||||||
|
&HistoricalSummaryCache::new(historical_summaries),
|
||||||
|
)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(leaves)
|
Ok(leaves)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,14 +365,6 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
hasher.write(leaf.as_bytes())?;
|
hasher.write(leaf.as_bytes())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Withdrawal indices (Capella and later).
|
|
||||||
if let Ok(next_withdrawal_index) = state.next_withdrawal_index() {
|
|
||||||
hasher.write(next_withdrawal_index.tree_hash_root().as_bytes())?;
|
|
||||||
}
|
|
||||||
if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() {
|
|
||||||
hasher.write(next_withdrawal_validator_index.tree_hash_root().as_bytes())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let root = hasher.finish()?;
|
let root = hasher.finish()?;
|
||||||
|
|
||||||
self.previous_state = Some((root, state.slot()));
|
self.previous_state = Some((root, state.slot()));
|
||||||
|
@ -253,11 +253,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
|
|||||||
fn max_blobs_per_block() -> usize {
|
fn max_blobs_per_block() -> usize {
|
||||||
Self::MaxBlobsPerBlock::to_usize()
|
Self::MaxBlobsPerBlock::to_usize()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `BYTES_PER_BLOB` constant for the specification.
|
|
||||||
fn bytes_per_blob() -> usize {
|
|
||||||
Self::BytesPerBlob::to_usize()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Macro to inherit some type values from another EthSpec.
|
/// Macro to inherit some type values from another EthSpec.
|
||||||
|
88
consensus/types/src/historical_summary.rs
Normal file
88
consensus/types/src/historical_summary.rs
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
use crate::test_utils::TestRandom;
|
||||||
|
use crate::Unsigned;
|
||||||
|
use crate::{BeaconState, EthSpec, Hash256};
|
||||||
|
use cached_tree_hash::Error;
|
||||||
|
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
|
||||||
|
use compare_fields_derive::CompareFields;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use ssz_types::VariableList;
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK};
|
||||||
|
use tree_hash_derive::TreeHash;
|
||||||
|
|
||||||
|
/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch`
|
||||||
|
/// making the two hash_tree_root-compatible. This struct is introduced into the beacon state
|
||||||
|
/// in the Capella hard fork.
|
||||||
|
///
|
||||||
|
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#historicalsummary
|
||||||
|
#[derive(
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
Encode,
|
||||||
|
Decode,
|
||||||
|
TreeHash,
|
||||||
|
TestRandom,
|
||||||
|
CompareFields,
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Default,
|
||||||
|
)]
|
||||||
|
pub struct HistoricalSummary {
|
||||||
|
block_summary_root: Hash256,
|
||||||
|
state_summary_root: Hash256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HistoricalSummary {
|
||||||
|
pub fn new<T: EthSpec>(state: &BeaconState<T>) -> Self {
|
||||||
|
Self {
|
||||||
|
block_summary_root: state.block_roots().tree_hash_root(),
|
||||||
|
state_summary_root: state.state_roots().tree_hash_root(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper type allowing the implementation of `CachedTreeHash`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HistoricalSummaryCache<'a, N: Unsigned> {
|
||||||
|
pub inner: &'a VariableList<HistoricalSummary, N>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> {
|
||||||
|
pub fn new(inner: &'a VariableList<HistoricalSummary, N>) -> Self {
|
||||||
|
Self { inner }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::len_without_is_empty)]
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.inner.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N: Unsigned> CachedTreeHash<TreeHashCache> for HistoricalSummaryCache<'a, N> {
|
||||||
|
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
|
||||||
|
TreeHashCache::new(arena, int_log(N::to_usize()), self.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recalculate_tree_hash_root(
|
||||||
|
&self,
|
||||||
|
arena: &mut CacheArena,
|
||||||
|
cache: &mut TreeHashCache,
|
||||||
|
) -> Result<Hash256, Error> {
|
||||||
|
Ok(mix_in_length(
|
||||||
|
&cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?,
|
||||||
|
self.len(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn leaf_iter(
|
||||||
|
values: &[HistoricalSummary],
|
||||||
|
) -> impl Iterator<Item = [u8; BYTES_PER_CHUNK]> + ExactSizeIterator + '_ {
|
||||||
|
values
|
||||||
|
.iter()
|
||||||
|
.map(|value| value.tree_hash_root())
|
||||||
|
.map(Hash256::to_fixed_bytes)
|
||||||
|
}
|
@ -49,6 +49,7 @@ pub mod fork_name;
|
|||||||
pub mod free_attestation;
|
pub mod free_attestation;
|
||||||
pub mod graffiti;
|
pub mod graffiti;
|
||||||
pub mod historical_batch;
|
pub mod historical_batch;
|
||||||
|
pub mod historical_summary;
|
||||||
pub mod indexed_attestation;
|
pub mod indexed_attestation;
|
||||||
pub mod light_client_bootstrap;
|
pub mod light_client_bootstrap;
|
||||||
pub mod light_client_finality_update;
|
pub mod light_client_finality_update;
|
||||||
|
@ -92,7 +92,7 @@ pub trait AbstractExecPayload<T: EthSpec>:
|
|||||||
+ From<ExecutionPayloadEip4844<T>>
|
+ From<ExecutionPayloadEip4844<T>>
|
||||||
+ TryFrom<ExecutionPayloadHeaderEip4844<T>>;
|
+ TryFrom<ExecutionPayloadHeaderEip4844<T>>;
|
||||||
|
|
||||||
fn default_at_fork(fork_name: ForkName) -> Self;
|
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[superstruct(
|
#[superstruct(
|
||||||
@ -372,13 +372,12 @@ impl<T: EthSpec> AbstractExecPayload<T> for FullPayload<T> {
|
|||||||
type Capella = FullPayloadCapella<T>;
|
type Capella = FullPayloadCapella<T>;
|
||||||
type Eip4844 = FullPayloadEip4844<T>;
|
type Eip4844 = FullPayloadEip4844<T>;
|
||||||
|
|
||||||
fn default_at_fork(fork_name: ForkName) -> Self {
|
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
|
||||||
match fork_name {
|
match fork_name {
|
||||||
//FIXME(sean) error handling
|
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
|
||||||
ForkName::Base | ForkName::Altair => panic!(),
|
ForkName::Merge => Ok(FullPayloadMerge::default().into()),
|
||||||
ForkName::Merge => FullPayloadMerge::default().into(),
|
ForkName::Capella => Ok(FullPayloadCapella::default().into()),
|
||||||
ForkName::Capella => FullPayloadCapella::default().into(),
|
ForkName::Eip4844 => Ok(FullPayloadEip4844::default().into()),
|
||||||
ForkName::Eip4844 => FullPayloadEip4844::default().into(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -882,13 +881,12 @@ impl<T: EthSpec> AbstractExecPayload<T> for BlindedPayload<T> {
|
|||||||
type Capella = BlindedPayloadCapella<T>;
|
type Capella = BlindedPayloadCapella<T>;
|
||||||
type Eip4844 = BlindedPayloadEip4844<T>;
|
type Eip4844 = BlindedPayloadEip4844<T>;
|
||||||
|
|
||||||
fn default_at_fork(fork_name: ForkName) -> Self {
|
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
|
||||||
match fork_name {
|
match fork_name {
|
||||||
//FIXME(sean) error handling
|
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
|
||||||
ForkName::Base | ForkName::Altair => panic!(),
|
ForkName::Merge => Ok(BlindedPayloadMerge::default().into()),
|
||||||
ForkName::Merge => BlindedPayloadMerge::default().into(),
|
ForkName::Capella => Ok(BlindedPayloadCapella::default().into()),
|
||||||
ForkName::Capella => BlindedPayloadCapella::default().into(),
|
ForkName::Eip4844 => Ok(BlindedPayloadEip4844::default().into()),
|
||||||
ForkName::Eip4844 => BlindedPayloadEip4844::default().into(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn preset_from_file<T: DeserializeOwned>(preset_name: &str, filename: &str) -> T {
|
fn preset_from_file<T: DeserializeOwned>(preset_name: &str, filename: &str) -> T {
|
||||||
let f = File::open(&presets_base_path().join(preset_name).join(filename))
|
let f = File::open(presets_base_path().join(preset_name).join(filename))
|
||||||
.expect("preset file exists");
|
.expect("preset file exists");
|
||||||
serde_yaml::from_reader(f).unwrap()
|
serde_yaml::from_reader(f).unwrap()
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,10 @@ use ssz::Encode;
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use types::{EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge};
|
use types::{
|
||||||
|
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderEip4844,
|
||||||
|
ExecutionPayloadHeaderMerge, ForkName,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||||
let eth1_block_hash = parse_required(matches, "execution-block-hash")?;
|
let eth1_block_hash = parse_required(matches, "execution-block-hash")?;
|
||||||
@ -17,17 +20,36 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?;
|
let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?;
|
||||||
let gas_limit = parse_required(matches, "gas-limit")?;
|
let gas_limit = parse_required(matches, "gas-limit")?;
|
||||||
let file_name = matches.value_of("file").ok_or("No file supplied")?;
|
let file_name = matches.value_of("file").ok_or("No file supplied")?;
|
||||||
|
let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Merge);
|
||||||
|
|
||||||
//FIXME(sean)
|
let execution_payload_header: ExecutionPayloadHeader<T> = match fork_name {
|
||||||
let execution_payload_header: ExecutionPayloadHeader<T> =
|
ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()),
|
||||||
ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge {
|
ForkName::Merge => ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge {
|
||||||
gas_limit,
|
gas_limit,
|
||||||
base_fee_per_gas,
|
base_fee_per_gas,
|
||||||
timestamp: genesis_time,
|
timestamp: genesis_time,
|
||||||
block_hash: eth1_block_hash,
|
block_hash: eth1_block_hash,
|
||||||
prev_randao: eth1_block_hash.into_root(),
|
prev_randao: eth1_block_hash.into_root(),
|
||||||
..ExecutionPayloadHeaderMerge::default()
|
..ExecutionPayloadHeaderMerge::default()
|
||||||
});
|
}),
|
||||||
|
ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella {
|
||||||
|
gas_limit,
|
||||||
|
base_fee_per_gas,
|
||||||
|
timestamp: genesis_time,
|
||||||
|
block_hash: eth1_block_hash,
|
||||||
|
prev_randao: eth1_block_hash.into_root(),
|
||||||
|
..ExecutionPayloadHeaderCapella::default()
|
||||||
|
}),
|
||||||
|
ForkName::Eip4844 => ExecutionPayloadHeader::Eip4844(ExecutionPayloadHeaderEip4844 {
|
||||||
|
gas_limit,
|
||||||
|
base_fee_per_gas,
|
||||||
|
timestamp: genesis_time,
|
||||||
|
block_hash: eth1_block_hash,
|
||||||
|
prev_randao: eth1_block_hash.into_root(),
|
||||||
|
..ExecutionPayloadHeaderEip4844::default()
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;
|
let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?;
|
||||||
let bytes = execution_payload_header.as_ssz_bytes();
|
let bytes = execution_payload_header.as_ssz_bytes();
|
||||||
file.write_all(bytes.as_slice())
|
file.write_all(bytes.as_slice())
|
||||||
|
@ -371,7 +371,8 @@ fn main() {
|
|||||||
.subcommand(
|
.subcommand(
|
||||||
SubCommand::with_name("create-payload-header")
|
SubCommand::with_name("create-payload-header")
|
||||||
.about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \
|
.about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \
|
||||||
Useful as input for `lcli new-testnet --execution-payload-header FILE`. ")
|
Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \
|
||||||
|
is not provided, a payload header for the `Bellatrix` fork will be created.")
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("execution-block-hash")
|
Arg::with_name("execution-block-hash")
|
||||||
.long("execution-block-hash")
|
.long("execution-block-hash")
|
||||||
@ -417,7 +418,15 @@ fn main() {
|
|||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.required(true)
|
.required(true)
|
||||||
.help("Output file"),
|
.help("Output file"),
|
||||||
)
|
).arg(
|
||||||
|
Arg::with_name("fork")
|
||||||
|
.long("fork")
|
||||||
|
.value_name("FORK")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value("bellatrix")
|
||||||
|
.help("The fork for which the execution payload header should be created.")
|
||||||
|
.possible_values(&["merge", "bellatrix", "capella", "eip4844"])
|
||||||
|
)
|
||||||
)
|
)
|
||||||
.subcommand(
|
.subcommand(
|
||||||
SubCommand::with_name("new-testnet")
|
SubCommand::with_name("new-testnet")
|
||||||
@ -759,7 +768,6 @@ fn main() {
|
|||||||
.value_name("PATH")
|
.value_name("PATH")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.conflicts_with("beacon-url")
|
.conflicts_with("beacon-url")
|
||||||
.requires("pre-state-path")
|
|
||||||
.help("Path to load a SignedBeaconBlock from file as SSZ."),
|
.help("Path to load a SignedBeaconBlock from file as SSZ."),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
|
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
|
||||||
use eth2_hashing::hash;
|
use eth2_hashing::hash;
|
||||||
use eth2_network_config::Eth2NetworkConfig;
|
use eth2_network_config::{Eth2NetworkConfig, TRUSTED_SETUP};
|
||||||
use ssz::Decode;
|
use ssz::Decode;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use state_processing::process_activations;
|
use state_processing::process_activations;
|
||||||
@ -13,9 +13,10 @@ use std::str::FromStr;
|
|||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use types::ExecutionBlockHash;
|
use types::ExecutionBlockHash;
|
||||||
use types::{
|
use types::{
|
||||||
test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Eth1Data,
|
test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch,
|
||||||
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, Hash256, Keypair, PublicKey,
|
Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella,
|
||||||
Validator,
|
ExecutionPayloadHeaderEip4844, ExecutionPayloadHeaderMerge, ForkName, Hash256, Keypair,
|
||||||
|
PublicKey, Validator,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
|
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
|
||||||
@ -80,8 +81,13 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
spec.capella_fork_epoch = Some(fork_epoch);
|
spec.capella_fork_epoch = Some(fork_epoch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut kzg_trusted_setup = None;
|
||||||
if let Some(fork_epoch) = parse_optional(matches, "eip4844-fork-epoch")? {
|
if let Some(fork_epoch) = parse_optional(matches, "eip4844-fork-epoch")? {
|
||||||
spec.eip4844_fork_epoch = Some(fork_epoch);
|
spec.eip4844_fork_epoch = Some(fork_epoch);
|
||||||
|
kzg_trusted_setup = Some(
|
||||||
|
serde_json::from_reader(TRUSTED_SETUP)
|
||||||
|
.map_err(|e| format!("Unable to read trusted setup file: {}", e))?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ttd) = parse_optional(matches, "ttd")? {
|
if let Some(ttd) = parse_optional(matches, "ttd")? {
|
||||||
@ -97,10 +103,25 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
.map_err(|e| format!("Unable to open {}: {}", filename, e))?;
|
.map_err(|e| format!("Unable to open {}: {}", filename, e))?;
|
||||||
file.read_to_end(&mut bytes)
|
file.read_to_end(&mut bytes)
|
||||||
.map_err(|e| format!("Unable to read {}: {}", filename, e))?;
|
.map_err(|e| format!("Unable to read {}: {}", filename, e))?;
|
||||||
//FIXME(sean)
|
let fork_name = spec.fork_name_at_epoch(Epoch::new(0));
|
||||||
ExecutionPayloadHeaderMerge::<T>::from_ssz_bytes(bytes.as_slice())
|
match fork_name {
|
||||||
.map(ExecutionPayloadHeader::Merge)
|
ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(
|
||||||
.map_err(|e| format!("SSZ decode failed: {:?}", e))
|
"genesis fork must be post-merge".to_string(),
|
||||||
|
)),
|
||||||
|
ForkName::Merge => {
|
||||||
|
ExecutionPayloadHeaderMerge::<T>::from_ssz_bytes(bytes.as_slice())
|
||||||
|
.map(ExecutionPayloadHeader::Merge)
|
||||||
|
}
|
||||||
|
ForkName::Capella => {
|
||||||
|
ExecutionPayloadHeaderCapella::<T>::from_ssz_bytes(bytes.as_slice())
|
||||||
|
.map(ExecutionPayloadHeader::Capella)
|
||||||
|
}
|
||||||
|
ForkName::Eip4844 => {
|
||||||
|
ExecutionPayloadHeaderEip4844::<T>::from_ssz_bytes(bytes.as_slice())
|
||||||
|
.map(ExecutionPayloadHeader::Eip4844)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.map_err(|e| format!("SSZ decode failed: {:?}", e))
|
||||||
})
|
})
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
@ -147,6 +168,7 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
boot_enr: Some(vec![]),
|
boot_enr: Some(vec![]),
|
||||||
genesis_state_bytes,
|
genesis_state_bytes,
|
||||||
config: Config::from_chain_spec::<T>(&spec),
|
config: Config::from_chain_spec::<T>(&spec),
|
||||||
|
kzg_trusted_setup,
|
||||||
};
|
};
|
||||||
|
|
||||||
testnet.write_to_file(testnet_dir_path, overwrite_files)
|
testnet.write_to_file(testnet_dir_path, overwrite_files)
|
||||||
|
@ -4,7 +4,7 @@ version = "3.3.0"
|
|||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
autotests = false
|
autotests = false
|
||||||
rust-version = "1.62"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["slasher-mdbx"]
|
default = ["slasher-mdbx"]
|
||||||
|
@ -39,8 +39,8 @@ fn double_vote_multi_vals() {
|
|||||||
fn double_vote_some_vals() {
|
fn double_vote_some_vals() {
|
||||||
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
||||||
let v2 = vec![0, 2, 4, 6];
|
let v2 = vec![0, 2, 4, 6];
|
||||||
let att1 = indexed_att(&v1, 0, 1, 0);
|
let att1 = indexed_att(v1, 0, 1, 0);
|
||||||
let att2 = indexed_att(&v2, 0, 1, 1);
|
let att2 = indexed_att(v2, 0, 1, 1);
|
||||||
let slashings = hashset![att_slashing(&att1, &att2)];
|
let slashings = hashset![att_slashing(&att1, &att2)];
|
||||||
let attestations = vec![att1, att2];
|
let attestations = vec![att1, att2];
|
||||||
slasher_test_indiv(&attestations, &slashings, 1);
|
slasher_test_indiv(&attestations, &slashings, 1);
|
||||||
@ -53,9 +53,9 @@ fn double_vote_some_vals_repeat() {
|
|||||||
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
||||||
let v2 = vec![0, 2, 4, 6];
|
let v2 = vec![0, 2, 4, 6];
|
||||||
let v3 = vec![1, 3, 5];
|
let v3 = vec![1, 3, 5];
|
||||||
let att1 = indexed_att(&v1, 0, 1, 0);
|
let att1 = indexed_att(v1, 0, 1, 0);
|
||||||
let att2 = indexed_att(&v2, 0, 1, 1);
|
let att2 = indexed_att(v2, 0, 1, 1);
|
||||||
let att3 = indexed_att(&v3, 0, 1, 0);
|
let att3 = indexed_att(v3, 0, 1, 0);
|
||||||
let slashings = hashset![att_slashing(&att1, &att2)];
|
let slashings = hashset![att_slashing(&att1, &att2)];
|
||||||
let attestations = vec![att1, att2, att3];
|
let attestations = vec![att1, att2, att3];
|
||||||
slasher_test_indiv(&attestations, &slashings, 1);
|
slasher_test_indiv(&attestations, &slashings, 1);
|
||||||
@ -67,8 +67,8 @@ fn double_vote_some_vals_repeat() {
|
|||||||
fn no_double_vote_same_target() {
|
fn no_double_vote_same_target() {
|
||||||
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
let v1 = vec![0, 1, 2, 3, 4, 5, 6];
|
||||||
let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8];
|
let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8];
|
||||||
let att1 = indexed_att(&v1, 0, 1, 0);
|
let att1 = indexed_att(v1, 0, 1, 0);
|
||||||
let att2 = indexed_att(&v2, 0, 1, 0);
|
let att2 = indexed_att(v2, 0, 1, 0);
|
||||||
let attestations = vec![att1, att2];
|
let attestations = vec![att1, att2];
|
||||||
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
||||||
slasher_test_indiv(&attestations, &hashset! {}, 1000);
|
slasher_test_indiv(&attestations, &hashset! {}, 1000);
|
||||||
@ -79,8 +79,8 @@ fn no_double_vote_same_target() {
|
|||||||
fn no_double_vote_distinct_vals() {
|
fn no_double_vote_distinct_vals() {
|
||||||
let v1 = vec![0, 1, 2, 3];
|
let v1 = vec![0, 1, 2, 3];
|
||||||
let v2 = vec![4, 5, 6, 7];
|
let v2 = vec![4, 5, 6, 7];
|
||||||
let att1 = indexed_att(&v1, 0, 1, 0);
|
let att1 = indexed_att(v1, 0, 1, 0);
|
||||||
let att2 = indexed_att(&v2, 0, 1, 1);
|
let att2 = indexed_att(v2, 0, 1, 1);
|
||||||
let attestations = vec![att1, att2];
|
let attestations = vec![att1, att2];
|
||||||
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
||||||
slasher_test_indiv(&attestations, &hashset! {}, 1000);
|
slasher_test_indiv(&attestations, &hashset! {}, 1000);
|
||||||
@ -89,7 +89,7 @@ fn no_double_vote_distinct_vals() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn no_double_vote_repeated() {
|
fn no_double_vote_repeated() {
|
||||||
let v = vec![0, 1, 2, 3, 4];
|
let v = vec![0, 1, 2, 3, 4];
|
||||||
let att1 = indexed_att(&v, 0, 1, 0);
|
let att1 = indexed_att(v, 0, 1, 0);
|
||||||
let att2 = att1.clone();
|
let att2 = att1.clone();
|
||||||
let attestations = vec![att1, att2];
|
let attestations = vec![att1, att2];
|
||||||
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
slasher_test_indiv(&attestations, &hashset! {}, 1);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
TESTS_TAG := v1.3.0-alpha.2
|
TESTS_TAG := v1.3.0-rc.0
|
||||||
TESTS = general minimal mainnet
|
TESTS = general minimal mainnet
|
||||||
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))
|
||||||
|
|
||||||
@ -13,6 +13,8 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST))
|
|||||||
BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME)
|
BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME)
|
||||||
BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG)
|
BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG)
|
||||||
|
|
||||||
|
WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget)
|
||||||
|
|
||||||
all:
|
all:
|
||||||
make $(OUTPUT_DIR)
|
make $(OUTPUT_DIR)
|
||||||
make $(BLS_OUTPUT_DIR)
|
make $(BLS_OUTPUT_DIR)
|
||||||
@ -25,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS)
|
|||||||
|
|
||||||
$(BLS_OUTPUT_DIR):
|
$(BLS_OUTPUT_DIR):
|
||||||
mkdir $(BLS_OUTPUT_DIR)
|
mkdir $(BLS_OUTPUT_DIR)
|
||||||
wget $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL)
|
$(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL)
|
||||||
tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR)
|
tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR)
|
||||||
|
|
||||||
%-$(TESTS_TAG).tar.gz:
|
%-$(TESTS_TAG).tar.gz:
|
||||||
wget $(BASE_URL)/$*.tar.gz -O $@
|
$(WGET) $(BASE_URL)/$*.tar.gz -O $@
|
||||||
|
|
||||||
clean-test-files:
|
clean-test-files:
|
||||||
rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR)
|
rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR)
|
||||||
|
@ -41,8 +41,6 @@ excluded_paths = [
|
|||||||
"tests/.*/.*/ssz_static/LightClientFinalityUpdate",
|
"tests/.*/.*/ssz_static/LightClientFinalityUpdate",
|
||||||
# Eip4844 tests are disabled for now.
|
# Eip4844 tests are disabled for now.
|
||||||
"tests/.*/eip4844",
|
"tests/.*/eip4844",
|
||||||
# Capella tests are disabled for now.
|
|
||||||
"tests/.*/capella",
|
|
||||||
# One of the EF researchers likes to pack the tarballs on a Mac
|
# One of the EF researchers likes to pack the tarballs on a Mac
|
||||||
".*\.DS_Store.*",
|
".*\.DS_Store.*",
|
||||||
# More Mac weirdness.
|
# More Mac weirdness.
|
||||||
|
@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file};
|
|||||||
use crate::type_name;
|
use crate::type_name;
|
||||||
use crate::type_name::TypeName;
|
use crate::type_name::TypeName;
|
||||||
use serde_derive::Deserialize;
|
use serde_derive::Deserialize;
|
||||||
|
use state_processing::per_epoch_processing::capella::process_historical_summaries_update;
|
||||||
use state_processing::per_epoch_processing::{
|
use state_processing::per_epoch_processing::{
|
||||||
altair, base,
|
altair, base,
|
||||||
effective_balance_updates::process_effective_balance_updates,
|
effective_balance_updates::process_effective_balance_updates,
|
||||||
@ -57,6 +58,8 @@ pub struct RandaoMixesReset;
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct HistoricalRootsUpdate;
|
pub struct HistoricalRootsUpdate;
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
pub struct HistoricalSummariesUpdate;
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct ParticipationRecordUpdates;
|
pub struct ParticipationRecordUpdates;
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct SyncCommitteeUpdates;
|
pub struct SyncCommitteeUpdates;
|
||||||
@ -77,6 +80,7 @@ type_name!(EffectiveBalanceUpdates, "effective_balance_updates");
|
|||||||
type_name!(SlashingsReset, "slashings_reset");
|
type_name!(SlashingsReset, "slashings_reset");
|
||||||
type_name!(RandaoMixesReset, "randao_mixes_reset");
|
type_name!(RandaoMixesReset, "randao_mixes_reset");
|
||||||
type_name!(HistoricalRootsUpdate, "historical_roots_update");
|
type_name!(HistoricalRootsUpdate, "historical_roots_update");
|
||||||
|
type_name!(HistoricalSummariesUpdate, "historical_summaries_update");
|
||||||
type_name!(ParticipationRecordUpdates, "participation_record_updates");
|
type_name!(ParticipationRecordUpdates, "participation_record_updates");
|
||||||
type_name!(SyncCommitteeUpdates, "sync_committee_updates");
|
type_name!(SyncCommitteeUpdates, "sync_committee_updates");
|
||||||
type_name!(InactivityUpdates, "inactivity_updates");
|
type_name!(InactivityUpdates, "inactivity_updates");
|
||||||
@ -194,7 +198,23 @@ impl<E: EthSpec> EpochTransition<E> for RandaoMixesReset {
|
|||||||
|
|
||||||
impl<E: EthSpec> EpochTransition<E> for HistoricalRootsUpdate {
|
impl<E: EthSpec> EpochTransition<E> for HistoricalRootsUpdate {
|
||||||
fn run(state: &mut BeaconState<E>, _spec: &ChainSpec) -> Result<(), EpochProcessingError> {
|
fn run(state: &mut BeaconState<E>, _spec: &ChainSpec) -> Result<(), EpochProcessingError> {
|
||||||
process_historical_roots_update(state)
|
match state {
|
||||||
|
BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Merge(_) => {
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
}
|
||||||
|
_ => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: EthSpec> EpochTransition<E> for HistoricalSummariesUpdate {
|
||||||
|
fn run(state: &mut BeaconState<E>, _spec: &ChainSpec) -> Result<(), EpochProcessingError> {
|
||||||
|
match state {
|
||||||
|
BeaconState::Capella(_) | BeaconState::Eip4844(_) => {
|
||||||
|
process_historical_summaries_update(state)
|
||||||
|
}
|
||||||
|
_ => Ok(()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,10 +307,16 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> {
|
|||||||
T::name() != "sync_committee_updates"
|
T::name() != "sync_committee_updates"
|
||||||
&& T::name() != "inactivity_updates"
|
&& T::name() != "inactivity_updates"
|
||||||
&& T::name() != "participation_flag_updates"
|
&& T::name() != "participation_flag_updates"
|
||||||
|
&& T::name() != "historical_summaries_update"
|
||||||
}
|
}
|
||||||
// No phase0 tests for Altair and later.
|
// No phase0 tests for Altair and later.
|
||||||
ForkName::Altair | ForkName::Merge | ForkName::Capella | ForkName::Eip4844 => {
|
ForkName::Altair | ForkName::Merge => {
|
||||||
T::name() != "participation_record_updates"
|
T::name() != "participation_record_updates"
|
||||||
|
&& T::name() != "historical_summaries_update"
|
||||||
|
}
|
||||||
|
ForkName::Capella | ForkName::Eip4844 => {
|
||||||
|
T::name() != "participation_record_updates"
|
||||||
|
&& T::name() != "historical_roots_update"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -311,6 +311,7 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
.keypairs(vec![])
|
.keypairs(vec![])
|
||||||
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
.recalculate_fork_times_with_genesis(0)
|
||||||
.mock_execution_layer_all_payloads_valid()
|
.mock_execution_layer_all_payloads_valid()
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -4,30 +4,24 @@ use crate::case_result::compare_beacon_state_results_without_caches;
|
|||||||
use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file};
|
use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file};
|
||||||
use crate::testing_spec;
|
use crate::testing_spec;
|
||||||
use serde_derive::Deserialize;
|
use serde_derive::Deserialize;
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use state_processing::per_block_processing::process_operations::process_bls_to_execution_changes;
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use state_processing::per_block_processing::process_withdrawals;
|
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
per_block_processing::{
|
per_block_processing::{
|
||||||
errors::BlockProcessingError,
|
errors::BlockProcessingError,
|
||||||
process_block_header, process_execution_payload,
|
process_block_header, process_execution_payload,
|
||||||
process_operations::{
|
process_operations::{
|
||||||
altair, base, process_attester_slashings, process_deposits, process_exits,
|
altair, base, process_attester_slashings, process_bls_to_execution_changes,
|
||||||
process_proposer_slashings,
|
process_deposits, process_exits, process_proposer_slashings,
|
||||||
},
|
},
|
||||||
process_sync_aggregate, VerifyBlockRoot, VerifySignatures,
|
process_sync_aggregate, process_withdrawals, VerifyBlockRoot, VerifySignatures,
|
||||||
},
|
},
|
||||||
ConsensusContext,
|
ConsensusContext,
|
||||||
};
|
};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use types::SignedBlsToExecutionChange;
|
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit,
|
Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit,
|
||||||
EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit,
|
EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange,
|
||||||
SyncAggregate,
|
SignedVoluntaryExit, SyncAggregate,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize)]
|
#[derive(Debug, Clone, Default, Deserialize)]
|
||||||
@ -42,7 +36,6 @@ struct ExecutionMetadata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Newtype for testing withdrawals.
|
/// Newtype for testing withdrawals.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
pub struct WithdrawalsPayload<T: EthSpec> {
|
pub struct WithdrawalsPayload<T: EthSpec> {
|
||||||
payload: FullPayload<T>,
|
payload: FullPayload<T>,
|
||||||
@ -341,7 +334,6 @@ impl<E: EthSpec> Operation<E> for BlindedPayload<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> {
|
impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> {
|
||||||
fn handler_name() -> String {
|
fn handler_name() -> String {
|
||||||
"withdrawals".into()
|
"withdrawals".into()
|
||||||
@ -368,7 +360,6 @@ impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
fn apply_to(
|
fn apply_to(
|
||||||
&self,
|
&self,
|
||||||
state: &mut BeaconState<E>,
|
state: &mut BeaconState<E>,
|
||||||
@ -384,7 +375,6 @@ impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
impl<E: EthSpec> Operation<E> for SignedBlsToExecutionChange {
|
impl<E: EthSpec> Operation<E> for SignedBlsToExecutionChange {
|
||||||
fn handler_name() -> String {
|
fn handler_name() -> String {
|
||||||
"bls_to_execution_change".into()
|
"bls_to_execution_change".into()
|
||||||
|
@ -377,6 +377,11 @@ impl<E: EthSpec + TypeName> Handler for SanitySlotsHandler<E> {
|
|||||||
fn handler_name(&self) -> String {
|
fn handler_name(&self) -> String {
|
||||||
"slots".into()
|
"slots".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool {
|
||||||
|
// Some sanity tests compute sync committees, which requires real crypto.
|
||||||
|
fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Derivative)]
|
#[derive(Derivative)]
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
pub use case_result::CaseResult;
|
pub use case_result::CaseResult;
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub use cases::WithdrawalsPayload;
|
pub use cases::WithdrawalsPayload;
|
||||||
pub use cases::{
|
pub use cases::{
|
||||||
Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates,
|
Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate,
|
||||||
JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates,
|
InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates,
|
||||||
RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset,
|
ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings,
|
||||||
SyncCommitteeUpdates,
|
SlashingsReset, SyncCommitteeUpdates,
|
||||||
};
|
};
|
||||||
pub use decode::log_file_access;
|
pub use decode::log_file_access;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
//! Mapping from types to canonical string identifiers used in testing.
|
//! Mapping from types to canonical string identifiers used in testing.
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
pub trait TypeName {
|
pub trait TypeName {
|
||||||
@ -92,3 +93,4 @@ type_name_generic!(
|
|||||||
"SignedBeaconBlockAndBlobsSidecar"
|
"SignedBeaconBlockAndBlobsSidecar"
|
||||||
);
|
);
|
||||||
type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange");
|
type_name!(SignedBlsToExecutionChange, "SignedBLSToExecutionChange");
|
||||||
|
type_name!(HistoricalSummary);
|
||||||
|
@ -82,14 +82,12 @@ fn operations_execution_payload_blinded() {
|
|||||||
OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run();
|
OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
#[test]
|
#[test]
|
||||||
fn operations_withdrawals() {
|
fn operations_withdrawals() {
|
||||||
OperationsHandler::<MinimalEthSpec, WithdrawalsPayload<_>>::default().run();
|
OperationsHandler::<MinimalEthSpec, WithdrawalsPayload<_>>::default().run();
|
||||||
OperationsHandler::<MainnetEthSpec, WithdrawalsPayload<_>>::default().run();
|
OperationsHandler::<MainnetEthSpec, WithdrawalsPayload<_>>::default().run();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
#[test]
|
#[test]
|
||||||
fn operations_bls_to_execution_change() {
|
fn operations_bls_to_execution_change() {
|
||||||
OperationsHandler::<MinimalEthSpec, SignedBlsToExecutionChange>::default().run();
|
OperationsHandler::<MinimalEthSpec, SignedBlsToExecutionChange>::default().run();
|
||||||
@ -217,6 +215,7 @@ macro_rules! ssz_static_test_no_run {
|
|||||||
#[cfg(feature = "fake_crypto")]
|
#[cfg(feature = "fake_crypto")]
|
||||||
mod ssz_static {
|
mod ssz_static {
|
||||||
use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler};
|
use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler};
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
use types::signed_block_and_blobs::SignedBeaconBlockAndBlobsSidecarDecode;
|
use types::signed_block_and_blobs::SignedBeaconBlockAndBlobsSidecarDecode;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -384,6 +383,12 @@ mod ssz_static {
|
|||||||
SszStaticHandler::<SignedBeaconBlockAndBlobsSidecarDecode<MinimalEthSpec>, MinimalEthSpec>::eip4844_only().run();
|
SszStaticHandler::<SignedBeaconBlockAndBlobsSidecarDecode<MinimalEthSpec>, MinimalEthSpec>::eip4844_only().run();
|
||||||
SszStaticHandler::<SignedBeaconBlockAndBlobsSidecarDecode<MainnetEthSpec>, MainnetEthSpec>::eip4844_only().run();
|
SszStaticHandler::<SignedBeaconBlockAndBlobsSidecarDecode<MainnetEthSpec>, MainnetEthSpec>::eip4844_only().run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn historical_summary() {
|
||||||
|
SszStaticHandler::<HistoricalSummary, MinimalEthSpec>::capella_only().run();
|
||||||
|
SszStaticHandler::<HistoricalSummary, MainnetEthSpec>::capella_only().run();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -450,6 +455,12 @@ fn epoch_processing_historical_roots_update() {
|
|||||||
EpochProcessingHandler::<MainnetEthSpec, HistoricalRootsUpdate>::default().run();
|
EpochProcessingHandler::<MainnetEthSpec, HistoricalRootsUpdate>::default().run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn epoch_processing_historical_summaries_update() {
|
||||||
|
EpochProcessingHandler::<MinimalEthSpec, HistoricalSummariesUpdate>::default().run();
|
||||||
|
EpochProcessingHandler::<MainnetEthSpec, HistoricalSummariesUpdate>::default().run();
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn epoch_processing_participation_record_updates() {
|
fn epoch_processing_participation_record_updates() {
|
||||||
EpochProcessingHandler::<MinimalEthSpec, ParticipationRecordUpdates>::default().run();
|
EpochProcessingHandler::<MinimalEthSpec, ParticipationRecordUpdates>::default().run();
|
||||||
|
@ -76,7 +76,7 @@ impl GenericExecutionEngine for NethermindEngine {
|
|||||||
fn init_datadir() -> TempDir {
|
fn init_datadir() -> TempDir {
|
||||||
let datadir = TempDir::new().unwrap();
|
let datadir = TempDir::new().unwrap();
|
||||||
let genesis_json_path = datadir.path().join("genesis.json");
|
let genesis_json_path = datadir.path().join("genesis.json");
|
||||||
let mut file = File::create(&genesis_json_path).unwrap();
|
let mut file = File::create(genesis_json_path).unwrap();
|
||||||
let json = nethermind_genesis_json();
|
let json = nethermind_genesis_json();
|
||||||
serde_json::to_writer(&mut file, &json).unwrap();
|
serde_json::to_writer(&mut file, &json).unwrap();
|
||||||
datadir
|
datadir
|
||||||
|
@ -231,7 +231,7 @@ impl<E: EthSpec> LocalExecutionNode<E> {
|
|||||||
.tempdir()
|
.tempdir()
|
||||||
.expect("should create temp directory for client datadir");
|
.expect("should create temp directory for client datadir");
|
||||||
let jwt_file_path = datadir.path().join("jwt.hex");
|
let jwt_file_path = datadir.path().join("jwt.hex");
|
||||||
if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) {
|
if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) {
|
||||||
panic!("Failed to write jwt file {}", e);
|
panic!("Failed to write jwt file {}", e);
|
||||||
}
|
}
|
||||||
Self {
|
Self {
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
//! This build script downloads the latest Web3Signer release and places it in the `OUT_DIR` so it
|
//! This build script downloads the latest Web3Signer release and places it in the `OUT_DIR` so it
|
||||||
//! can be used for integration testing.
|
//! can be used for integration testing.
|
||||||
|
|
||||||
use reqwest::Client;
|
use reqwest::{
|
||||||
|
header::{self, HeaderValue},
|
||||||
|
Client,
|
||||||
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
@ -15,10 +18,15 @@ const FIXED_VERSION_STRING: Option<&str> = None;
|
|||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let out_dir = env::var("OUT_DIR").unwrap();
|
let out_dir = env::var("OUT_DIR").unwrap();
|
||||||
download_binary(out_dir.into()).await;
|
|
||||||
|
// Read a Github API token from the environment. This is intended to prevent rate-limits on CI.
|
||||||
|
// We use a name that is unlikely to accidentally collide with anything the user has configured.
|
||||||
|
let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN");
|
||||||
|
|
||||||
|
download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn download_binary(dest_dir: PathBuf) {
|
pub async fn download_binary(dest_dir: PathBuf, github_token: &str) {
|
||||||
let version_file = dest_dir.join("version");
|
let version_file = dest_dir.join("version");
|
||||||
|
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
@ -33,8 +41,11 @@ pub async fn download_binary(dest_dir: PathBuf) {
|
|||||||
env_version
|
env_version
|
||||||
} else {
|
} else {
|
||||||
// Get the latest release of the web3 signer repo.
|
// Get the latest release of the web3 signer repo.
|
||||||
|
let mut token_header_value = HeaderValue::from_str(github_token).unwrap();
|
||||||
|
token_header_value.set_sensitive(true);
|
||||||
let latest_response: Value = client
|
let latest_response: Value = client
|
||||||
.get("https://api.github.com/repos/ConsenSys/web3signer/releases/latest")
|
.get("https://api.github.com/repos/ConsenSys/web3signer/releases/latest")
|
||||||
|
.header(header::AUTHORIZATION, token_header_value)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -441,7 +441,7 @@ impl DoppelgangerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a list of indices to provide to the BN API.
|
// Get a list of indices to provide to the BN API.
|
||||||
let indices_only = indices_map.iter().map(|(index, _)| *index).collect();
|
let indices_only = indices_map.keys().copied().collect();
|
||||||
|
|
||||||
// Pull the liveness responses from the BN.
|
// Pull the liveness responses from the BN.
|
||||||
let request_epoch = request_slot.epoch(E::slots_per_epoch());
|
let request_epoch = request_slot.epoch(E::slots_per_epoch());
|
||||||
@ -971,16 +971,16 @@ mod test {
|
|||||||
LivenessResponses {
|
LivenessResponses {
|
||||||
current_epoch_responses: detection_indices
|
current_epoch_responses: detection_indices
|
||||||
.iter()
|
.iter()
|
||||||
.map(|i| LivenessResponseData {
|
.map(|&index| LivenessResponseData {
|
||||||
index: *i as u64,
|
index,
|
||||||
epoch: current_epoch,
|
epoch: current_epoch,
|
||||||
is_live: false,
|
is_live: false,
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
previous_epoch_responses: detection_indices
|
previous_epoch_responses: detection_indices
|
||||||
.iter()
|
.iter()
|
||||||
.map(|i| LivenessResponseData {
|
.map(|&index| LivenessResponseData {
|
||||||
index: *i as u64,
|
index,
|
||||||
epoch: current_epoch - 1,
|
epoch: current_epoch - 1,
|
||||||
is_live: false,
|
is_live: false,
|
||||||
})
|
})
|
||||||
|
@ -331,7 +331,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
.and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| {
|
.and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
let app_uptime = app_start.elapsed().as_secs() as u64;
|
let app_uptime = app_start.elapsed().as_secs();
|
||||||
Ok(api_types::GenericResponse::from(observe_system_health_vc(
|
Ok(api_types::GenericResponse::from(observe_system_health_vc(
|
||||||
sysinfo, val_dir, app_uptime,
|
sysinfo, val_dir, app_uptime,
|
||||||
)))
|
)))
|
||||||
|
@ -472,7 +472,7 @@ impl InitializedValidators {
|
|||||||
|
|
||||||
/// Iterate through all voting public keys in `self` that should be used when querying for duties.
|
/// Iterate through all voting public keys in `self` that should be used when querying for duties.
|
||||||
pub fn iter_voting_pubkeys(&self) -> impl Iterator<Item = &PublicKeyBytes> {
|
pub fn iter_voting_pubkeys(&self) -> impl Iterator<Item = &PublicKeyBytes> {
|
||||||
self.validators.iter().map(|(pubkey, _)| pubkey)
|
self.validators.keys()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true:
|
/// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true:
|
||||||
|
@ -104,7 +104,7 @@ impl KeyCache {
|
|||||||
let file = File::options()
|
let file = File::options()
|
||||||
.read(true)
|
.read(true)
|
||||||
.create_new(false)
|
.create_new(false)
|
||||||
.open(&cache_path)
|
.open(cache_path)
|
||||||
.map_err(Error::UnableToOpenFile)?;
|
.map_err(Error::UnableToOpenFile)?;
|
||||||
serde_json::from_reader(file).map_err(Error::UnableToParseFile)
|
serde_json::from_reader(file).map_err(Error::UnableToParseFile)
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,6 @@ pub enum Error {
|
|||||||
pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
|
pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
|
||||||
RandaoReveal(Epoch),
|
RandaoReveal(Epoch),
|
||||||
BeaconBlock(&'a BeaconBlock<T, Payload>),
|
BeaconBlock(&'a BeaconBlock<T, Payload>),
|
||||||
BlobsSidecar(&'a BlobsSidecar<T>),
|
|
||||||
AttestationData(&'a AttestationData),
|
AttestationData(&'a AttestationData),
|
||||||
SignedAggregateAndProof(&'a AggregateAndProof<T>),
|
SignedAggregateAndProof(&'a AggregateAndProof<T>),
|
||||||
SelectionProof(Slot),
|
SelectionProof(Slot),
|
||||||
@ -59,7 +58,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay
|
|||||||
match self {
|
match self {
|
||||||
SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain),
|
SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain),
|
||||||
SignableMessage::BeaconBlock(b) => b.signing_root(domain),
|
SignableMessage::BeaconBlock(b) => b.signing_root(domain),
|
||||||
SignableMessage::BlobsSidecar(b) => b.signing_root(domain),
|
|
||||||
SignableMessage::AttestationData(a) => a.signing_root(domain),
|
SignableMessage::AttestationData(a) => a.signing_root(domain),
|
||||||
SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain),
|
SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain),
|
||||||
SignableMessage::SelectionProof(slot) => slot.signing_root(domain),
|
SignableMessage::SelectionProof(slot) => slot.signing_root(domain),
|
||||||
@ -182,7 +180,6 @@ impl SigningMethod {
|
|||||||
Web3SignerObject::RandaoReveal { epoch }
|
Web3SignerObject::RandaoReveal { epoch }
|
||||||
}
|
}
|
||||||
SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?,
|
SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?,
|
||||||
SignableMessage::BlobsSidecar(blob) => Web3SignerObject::BlobsSidecar(blob),
|
|
||||||
SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a),
|
SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a),
|
||||||
SignableMessage::SignedAggregateAndProof(a) => {
|
SignableMessage::SignedAggregateAndProof(a) => {
|
||||||
Web3SignerObject::AggregateAndProof(a)
|
Web3SignerObject::AggregateAndProof(a)
|
||||||
|
@ -11,7 +11,6 @@ pub enum MessageType {
|
|||||||
AggregateAndProof,
|
AggregateAndProof,
|
||||||
Attestation,
|
Attestation,
|
||||||
BlockV2,
|
BlockV2,
|
||||||
BlobsSidecar,
|
|
||||||
Deposit,
|
Deposit,
|
||||||
RandaoReveal,
|
RandaoReveal,
|
||||||
VoluntaryExit,
|
VoluntaryExit,
|
||||||
@ -52,8 +51,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload<T>> {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
block_header: Option<BeaconBlockHeader>,
|
block_header: Option<BeaconBlockHeader>,
|
||||||
},
|
},
|
||||||
//FIXME(sean) just guessing here
|
|
||||||
BlobsSidecar(&'a BlobsSidecar<T>),
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
Deposit {
|
Deposit {
|
||||||
pubkey: PublicKeyBytes,
|
pubkey: PublicKeyBytes,
|
||||||
@ -114,7 +111,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> Web3SignerObject<'a, T, Pa
|
|||||||
Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof,
|
Web3SignerObject::AggregateAndProof(_) => MessageType::AggregateAndProof,
|
||||||
Web3SignerObject::Attestation(_) => MessageType::Attestation,
|
Web3SignerObject::Attestation(_) => MessageType::Attestation,
|
||||||
Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2,
|
Web3SignerObject::BeaconBlock { .. } => MessageType::BlockV2,
|
||||||
Web3SignerObject::BlobsSidecar(_) => MessageType::BlobsSidecar,
|
|
||||||
Web3SignerObject::Deposit { .. } => MessageType::Deposit,
|
Web3SignerObject::Deposit { .. } => MessageType::Deposit,
|
||||||
Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal,
|
Web3SignerObject::RandaoReveal { .. } => MessageType::RandaoReveal,
|
||||||
Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit,
|
Web3SignerObject::VoluntaryExit(_) => MessageType::VoluntaryExit,
|
||||||
|
Loading…
Reference in New Issue
Block a user