Merge pull request #4349 from jimmygchen/deneb-merge-from-unstable-20230530
Deneb merge from unstable 2023/05/30
This commit is contained in:
commit
e8f1d533fb
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -5,7 +5,6 @@ on:
|
||||
branches:
|
||||
- unstable
|
||||
- stable
|
||||
- capella
|
||||
- eip4844
|
||||
tags:
|
||||
- v*
|
||||
@ -36,11 +35,6 @@ jobs:
|
||||
run: |
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV
|
||||
- name: Extract version (if capella)
|
||||
if: github.event.ref == 'refs/heads/capella'
|
||||
run: |
|
||||
echo "VERSION=capella" >> $GITHUB_ENV
|
||||
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
||||
- name: Extract version (if eip4844)
|
||||
if: github.event.ref == 'refs/heads/eip4844'
|
||||
run: |
|
||||
|
1
.github/workflows/linkcheck.yml
vendored
1
.github/workflows/linkcheck.yml
vendored
@ -7,6 +7,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'book/**'
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
linkcheck:
|
||||
|
19
.github/workflows/local-testnet.yml
vendored
19
.github/workflows/local-testnet.yml
vendored
@ -6,6 +6,7 @@ on:
|
||||
branches:
|
||||
- unstable
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
run-local-testnet:
|
||||
@ -24,25 +25,23 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: npm install ganache@latest --global
|
||||
- name: Install geth
|
||||
- name: Install geth (ubuntu)
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
run: |
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
sudo apt-get update
|
||||
sudo apt-get install ethereum
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
- name: Install geth (mac)
|
||||
if: matrix.os == 'macos-12'
|
||||
run: |
|
||||
brew tap ethereum/ethereum
|
||||
brew install ethereum
|
||||
if: matrix.os == 'macos-12'
|
||||
- name: Install GNU sed & GNU grep
|
||||
if: matrix.os == 'macos-12'
|
||||
run: |
|
||||
brew install gnu-sed grep
|
||||
echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH
|
||||
if: matrix.os == 'macos-12'
|
||||
|
||||
# https://github.com/actions/cache/blob/main/examples.md#rust---cargo
|
||||
- uses: actions/cache@v3
|
||||
id: cache-cargo
|
||||
@ -59,7 +58,7 @@ jobs:
|
||||
run: make && make install-lcli
|
||||
|
||||
- name: Start local testnet
|
||||
run: ./start_local_testnet.sh && sleep 60
|
||||
run: ./start_local_testnet.sh genesis.json && sleep 60
|
||||
working-directory: scripts/local_testnet
|
||||
|
||||
- name: Print logs
|
||||
@ -75,7 +74,7 @@ jobs:
|
||||
working-directory: scripts/local_testnet
|
||||
|
||||
- name: Start local testnet with blinded block production
|
||||
run: ./start_local_testnet.sh -p && sleep 60
|
||||
run: ./start_local_testnet.sh -p genesis.json && sleep 60
|
||||
working-directory: scripts/local_testnet
|
||||
|
||||
- name: Print logs for blinded block testnet
|
||||
@ -84,4 +83,4 @@ jobs:
|
||||
|
||||
- name: Stop local testnet with blinded block production
|
||||
run: ./stop_local_testnet.sh
|
||||
working-directory: scripts/local_testnet
|
||||
working-directory: scripts/local_testnet
|
37
.github/workflows/test-suite.yml
vendored
37
.github/workflows/test-suite.yml
vendored
@ -8,6 +8,7 @@ on:
|
||||
- trying
|
||||
- 'pr/*'
|
||||
pull_request:
|
||||
merge_group:
|
||||
env:
|
||||
# Deny warnings in CI
|
||||
# Disable debug info (see https://github.com/sigp/lighthouse/issues/4005)
|
||||
@ -21,7 +22,7 @@ jobs:
|
||||
target-branch-check:
|
||||
name: target-branch-check
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
if: github.event_name == 'pull_request' || github.event_name == 'merge_group'
|
||||
steps:
|
||||
- name: Check that the pull request is not targeting the stable branch
|
||||
run: test ${{ github.base_ref }} != "stable"
|
||||
@ -58,8 +59,8 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Run tests in release
|
||||
run: make test-release
|
||||
release-tests-windows:
|
||||
@ -78,8 +79,8 @@ jobs:
|
||||
run: |
|
||||
choco install python protoc visualstudio2019-workload-vctools -y
|
||||
npm config set msvs_version 2019
|
||||
- name: Install ganache
|
||||
run: npm install -g ganache --loglevel verbose
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Install make
|
||||
run: choco install -y make
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
@ -140,8 +141,8 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Run tests in debug
|
||||
run: make test-debug
|
||||
state-transition-vectors-ubuntu:
|
||||
@ -196,8 +197,8 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||
run: cargo run --release --bin simulator eth1-sim
|
||||
merge-transition-ubuntu:
|
||||
@ -212,8 +213,8 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Run the beacon chain sim and go through the merge transition
|
||||
run: cargo run --release --bin simulator eth1-sim --post-merge
|
||||
no-eth1-simulator-ubuntu:
|
||||
@ -228,8 +229,6 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Run the beacon chain sim without an eth1 connection
|
||||
run: cargo run --release --bin simulator no-eth1-sim
|
||||
syncing-simulator-ubuntu:
|
||||
@ -244,8 +243,8 @@ jobs:
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install ganache
|
||||
run: sudo npm install -g ganache
|
||||
- name: Install anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil
|
||||
- name: Run the syncing simulator
|
||||
run: cargo run --release --bin simulator syncing-sim
|
||||
doppelganger-protection-test:
|
||||
@ -269,14 +268,14 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
make install-lcli
|
||||
- name: Run the doppelganger protection success test script
|
||||
run: |
|
||||
cd scripts/tests
|
||||
./doppelganger_protection.sh success genesis.json
|
||||
- name: Run the doppelganger protection failure test script
|
||||
run: |
|
||||
cd scripts/tests
|
||||
./doppelganger_protection.sh failure genesis.json
|
||||
- name: Run the doppelganger protection success test script
|
||||
run: |
|
||||
cd scripts/tests
|
||||
./doppelganger_protection.sh success genesis.json
|
||||
execution-engine-integration-ubuntu:
|
||||
name: execution-engine-integration-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -45,8 +45,8 @@ questions.
|
||||
2. **Work in a feature branch** of your personal fork
|
||||
(github.com/YOUR_NAME/lighthouse) of the main repository
|
||||
(github.com/sigp/lighthouse).
|
||||
3. Once you feel you have addressed the issue, **create a pull-request** to merge
|
||||
your changes into the main repository.
|
||||
3. Once you feel you have addressed the issue, **create a pull-request** with
|
||||
`unstable` as the base branch to merge your changes into the main repository.
|
||||
4. Wait for the repository maintainers to **review your changes** to ensure the
|
||||
issue is addressed satisfactorily. Optionally, mention your PR on
|
||||
[discord](https://discord.gg/cyAszAh).
|
||||
|
1260
Cargo.lock
generated
1260
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@ -53,22 +53,14 @@ members = [
|
||||
"consensus/fork_choice",
|
||||
"consensus/proto_array",
|
||||
"consensus/safe_arith",
|
||||
"consensus/ssz",
|
||||
"consensus/ssz_derive",
|
||||
"consensus/ssz_types",
|
||||
"consensus/serde_utils",
|
||||
"consensus/state_processing",
|
||||
"consensus/swap_or_not_shuffle",
|
||||
"consensus/tree_hash",
|
||||
"consensus/tree_hash_derive",
|
||||
|
||||
"crypto/bls",
|
||||
"crypto/kzg",
|
||||
"crypto/eth2_hashing",
|
||||
"crypto/eth2_key_derivation",
|
||||
"crypto/eth2_keystore",
|
||||
"crypto/eth2_wallet",
|
||||
"crypto/kzg",
|
||||
|
||||
"lcli",
|
||||
|
||||
@ -97,14 +89,10 @@ resolver = "2"
|
||||
[patch]
|
||||
[patch.crates-io]
|
||||
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
|
||||
eth2_ssz = { path = "consensus/ssz" }
|
||||
eth2_ssz_derive = { path = "consensus/ssz_derive" }
|
||||
eth2_ssz_types = { path = "consensus/ssz_types" }
|
||||
eth2_hashing = { path = "crypto/eth2_hashing" }
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||
eth2_serde_utils = { path = "consensus/serde_utils" }
|
||||
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
|
||||
tree_hash = { git = "https://github.com/sigp/tree_hash", rev="a2471f3b240f407a0ec7436cff11f03e5ec8c706" }
|
||||
ssz_types = { git = "https://github.com/sigp/ssz_types", rev="63a80d04286c8561d5c211230a21bf1299d66059" }
|
||||
ethereum_ssz_derive = { git = "https://github.com/jimmygchen/ethereum_ssz", rev="231aa8c840262da694e024235dbc638a2980c545"}
|
||||
|
||||
[patch."https://github.com/ralexstokes/mev-rs"]
|
||||
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
|
||||
|
@ -2,7 +2,9 @@ FROM rust:1.68.2-bullseye AS builder
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||
COPY . lighthouse
|
||||
ARG FEATURES
|
||||
ARG PROFILE=release
|
||||
ENV FEATURES $FEATURES
|
||||
ENV PROFILE $PROFILE
|
||||
RUN cd lighthouse && make
|
||||
|
||||
FROM ubuntu:22.04
|
||||
|
@ -27,7 +27,6 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password";
|
||||
pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/";
|
||||
pub const CONFIRMATION_PHRASE: &str = "Exit my validator";
|
||||
pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html";
|
||||
pub const PROMPT: &str = "WARNING: WITHDRAWING STAKED ETH IS NOT CURRENTLY POSSIBLE";
|
||||
|
||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
App::new("exit")
|
||||
@ -161,7 +160,6 @@ async fn publish_voluntary_exit<E: EthSpec>(
|
||||
);
|
||||
if !no_confirmation {
|
||||
eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n");
|
||||
eprintln!("{}\n", PROMPT);
|
||||
eprintln!(
|
||||
"PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.",
|
||||
WEBSITE_URL
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "4.1.0"
|
||||
version = "4.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2021"
|
||||
|
||||
|
@ -33,12 +33,12 @@ serde_derive = "1.0.116"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
sloggers = { version = "2.1.1", features = ["json"] }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
eth2_hashing = "0.3.0"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_types = "0.2.2"
|
||||
eth2_ssz_derive = "0.3.1"
|
||||
ethereum_hashing = "1.0.0-beta.2"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ssz_types = "0.5.0"
|
||||
ethereum_ssz_derive = "0.5.0"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
tree_hash = "0.4.1"
|
||||
tree_hash = "0.5.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
tokio = "1.14.0"
|
||||
tokio-stream = "0.1.3"
|
||||
|
@ -65,14 +65,15 @@ where
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let fork = chain.canonical_head.cached_head().head_fork();
|
||||
|
||||
let mut signature_sets = Vec::with_capacity(num_indexed * 3);
|
||||
|
||||
// Iterate, flattening to get only the `Ok` values.
|
||||
for indexed in indexing_results.iter().flatten() {
|
||||
let signed_aggregate = &indexed.signed_aggregate;
|
||||
let indexed_attestation = &indexed.indexed_attestation;
|
||||
let fork = chain
|
||||
.spec
|
||||
.fork_at_epoch(indexed_attestation.data.target.epoch);
|
||||
|
||||
signature_sets.push(
|
||||
signed_aggregate_selection_proof_signature_set(
|
||||
@ -169,8 +170,6 @@ where
|
||||
&metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES,
|
||||
);
|
||||
|
||||
let fork = chain.canonical_head.cached_head().head_fork();
|
||||
|
||||
let pubkey_cache = chain
|
||||
.validator_pubkey_cache
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
@ -181,6 +180,9 @@ where
|
||||
// Iterate, flattening to get only the `Ok` values.
|
||||
for partially_verified in partial_results.iter().flatten() {
|
||||
let indexed_attestation = &partially_verified.indexed_attestation;
|
||||
let fork = chain
|
||||
.spec
|
||||
.fork_at_epoch(indexed_attestation.data.target.epoch);
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
|
||||
|
@ -97,7 +97,8 @@ use state_processing::{
|
||||
},
|
||||
per_slot_processing,
|
||||
state_advance::{complete_state_advance, partial_state_advance},
|
||||
BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation,
|
||||
BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, StateProcessingStrategy,
|
||||
VerifyBlockRoot, VerifyOperation,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
@ -464,6 +465,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub slasher: Option<Arc<Slasher<T::EthSpec>>>,
|
||||
/// Provides monitoring of a set of explicitly defined validators.
|
||||
pub validator_monitor: RwLock<ValidatorMonitor<T::EthSpec>>,
|
||||
/// The slot at which blocks are downloaded back to.
|
||||
pub genesis_backfill_slot: Slot,
|
||||
pub proposal_blob_cache: BlobCache<T::EthSpec>,
|
||||
pub data_availability_checker: DataAvailabilityChecker<T>,
|
||||
pub kzg: Option<Arc<Kzg>>,
|
||||
@ -599,7 +602,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
/// Persists `self.eth1_chain` and its caches to disk.
|
||||
pub fn persist_eth1_cache(&self) -> Result<(), Error> {
|
||||
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||
let _timer = metrics::start_timer(&metrics::PERSIST_ETH1_CACHE);
|
||||
|
||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||
self.store
|
||||
@ -4891,6 +4894,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&mut state,
|
||||
&block,
|
||||
signature_strategy,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut ctxt,
|
||||
&self.spec,
|
||||
@ -5805,6 +5809,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let shuffling_id = BlockShufflingIds {
|
||||
current: head_block.current_epoch_shuffling_id.clone(),
|
||||
next: head_block.next_epoch_shuffling_id.clone(),
|
||||
previous: None,
|
||||
block_root: head_block.root,
|
||||
}
|
||||
.id_for_epoch(shuffling_epoch)
|
||||
|
@ -218,7 +218,6 @@ where
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances.effective_balances.clone(),
|
||||
best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
@ -355,24 +354,62 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV17;
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
#[superstruct(
|
||||
variants(V11, V17),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
#[superstruct(only(V11))]
|
||||
#[superstruct(only(V11, V17))]
|
||||
pub balances_cache: BalancesCacheV8,
|
||||
pub time: Slot,
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
pub justified_balances: Vec<u64>,
|
||||
#[superstruct(only(V11))]
|
||||
pub best_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V11))]
|
||||
#[superstruct(only(V11, V17))]
|
||||
pub unrealized_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V11))]
|
||||
#[superstruct(only(V11, V17))]
|
||||
pub unrealized_finalized_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V11))]
|
||||
#[superstruct(only(V11, V17))]
|
||||
pub proposer_boost_root: Hash256,
|
||||
#[superstruct(only(V11))]
|
||||
#[superstruct(only(V11, V17))]
|
||||
pub equivocating_indices: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11;
|
||||
impl Into<PersistedForkChoiceStore> for PersistedForkChoiceStoreV11 {
|
||||
fn into(self) -> PersistedForkChoiceStore {
|
||||
PersistedForkChoiceStore {
|
||||
balances_cache: self.balances_cache,
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
equivocating_indices: self.equivocating_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoiceStoreV11> for PersistedForkChoiceStore {
|
||||
fn into(self) -> PersistedForkChoiceStoreV11 {
|
||||
PersistedForkChoiceStoreV11 {
|
||||
balances_cache: self.balances_cache,
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances,
|
||||
best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
equivocating_indices: self.equivocating_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ use state_processing::{
|
||||
per_block_processing, per_slot_processing,
|
||||
state_advance::partial_state_advance,
|
||||
BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError,
|
||||
VerifyBlockRoot,
|
||||
StateProcessingStrategy, VerifyBlockRoot,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::fs;
|
||||
@ -1615,6 +1615,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
block.as_block(),
|
||||
// Signatures were verified earlier in this function.
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut consensus_context,
|
||||
&chain.spec,
|
||||
|
@ -8,7 +8,7 @@ use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_bound
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::migrate::{BackgroundMigrator, MigratorConfig};
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||
use crate::validator_monitor::ValidatorMonitor;
|
||||
@ -710,6 +710,8 @@ where
|
||||
)?;
|
||||
}
|
||||
|
||||
let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?;
|
||||
|
||||
let mut head_snapshot = BeaconSnapshot {
|
||||
beacon_block_root: head_block_root,
|
||||
beacon_block: Arc::new(head_block),
|
||||
@ -791,6 +793,29 @@ where
|
||||
let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot));
|
||||
let shuffling_cache_size = self.chain_config.shuffling_cache_size;
|
||||
|
||||
// Calculate the weak subjectivity point in which to backfill blocks to.
|
||||
let genesis_backfill_slot = if self.chain_config.genesis_backfill {
|
||||
Slot::new(0)
|
||||
} else {
|
||||
let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay
|
||||
+ self.spec.churn_limit_quotient)
|
||||
.as_u64()
|
||||
/ 2;
|
||||
match slot_clock.now() {
|
||||
Some(current_slot) => {
|
||||
let genesis_backfill_epoch = current_slot
|
||||
.epoch(TEthSpec::slots_per_epoch())
|
||||
.saturating_sub(backfill_epoch_range);
|
||||
genesis_backfill_epoch.start_slot(TEthSpec::slots_per_epoch())
|
||||
}
|
||||
None => {
|
||||
// The slot clock cannot derive the current slot. We therefore assume we are
|
||||
// at or prior to genesis and backfill should sync all the way to genesis.
|
||||
Slot::new(0)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let beacon_chain = BeaconChain {
|
||||
spec: self.spec.clone(),
|
||||
config: self.chain_config,
|
||||
@ -845,7 +870,11 @@ where
|
||||
DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
head_for_snapshot_cache,
|
||||
)),
|
||||
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(shuffling_cache_size)),
|
||||
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(
|
||||
shuffling_cache_size,
|
||||
head_shuffling_ids,
|
||||
log.clone(),
|
||||
)),
|
||||
eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())),
|
||||
beacon_proposer_cache: <_>::default(),
|
||||
block_times_cache: <_>::default(),
|
||||
@ -860,6 +889,7 @@ where
|
||||
graffiti: self.graffiti,
|
||||
slasher: self.slasher.clone(),
|
||||
validator_monitor: RwLock::new(validator_monitor),
|
||||
genesis_backfill_slot,
|
||||
//TODO(sean) should we move kzg solely to the da checker?
|
||||
data_availability_checker: DataAvailabilityChecker::new(
|
||||
slot_clock,
|
||||
@ -1036,7 +1066,7 @@ mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::EphemeralHarnessType;
|
||||
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||
use eth2_hashing::hash;
|
||||
use ethereum_hashing::hash;
|
||||
use genesis::{
|
||||
generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH,
|
||||
};
|
||||
|
@ -31,7 +31,9 @@
|
||||
//! the head block root. This is unacceptable for fast-responding functions like the networking
|
||||
//! stack.
|
||||
|
||||
use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT;
|
||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||
use crate::shuffling_cache::BlockShufflingIds;
|
||||
use crate::{
|
||||
beacon_chain::{
|
||||
BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate,
|
||||
@ -850,6 +852,35 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
);
|
||||
});
|
||||
|
||||
match BlockShufflingIds::try_from_head(
|
||||
new_snapshot.beacon_block_root,
|
||||
&new_snapshot.beacon_state,
|
||||
) {
|
||||
Ok(head_shuffling_ids) => {
|
||||
self.shuffling_cache
|
||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||
.map(|mut shuffling_cache| {
|
||||
shuffling_cache.update_head_shuffling_ids(head_shuffling_ids)
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
error!(
|
||||
self.log,
|
||||
"Failed to obtain cache write lock";
|
||||
"lock" => "shuffling_cache",
|
||||
"task" => "update head shuffling decision root"
|
||||
);
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
self.log,
|
||||
"Failed to get head shuffling ids";
|
||||
"error" => ?e,
|
||||
"head_block_root" => ?new_snapshot.beacon_block_root
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
observe_head_block_delays(
|
||||
&mut self.block_times_cache.write(),
|
||||
&new_head_proto_block,
|
||||
|
@ -73,6 +73,9 @@ pub struct ChainConfig {
|
||||
pub optimistic_finalized_sync: bool,
|
||||
/// The size of the shuffling cache,
|
||||
pub shuffling_cache_size: usize,
|
||||
/// If using a weak-subjectivity sync, whether we should download blocks all the way back to
|
||||
/// genesis.
|
||||
pub genesis_backfill: bool,
|
||||
/// Whether to send payload attributes every slot, regardless of connected proposers.
|
||||
///
|
||||
/// This is useful for block builders and testing.
|
||||
@ -106,6 +109,7 @@ impl Default for ChainConfig {
|
||||
// This value isn't actually read except in tests.
|
||||
optimistic_finalized_sync: true,
|
||||
shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE,
|
||||
genesis_backfill: false,
|
||||
always_prepare_payload: false,
|
||||
enable_backfill_rate_limiting: true,
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::metrics;
|
||||
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
|
||||
use eth2::lighthouse::Eth1SyncStatusData;
|
||||
use eth2_hashing::hash;
|
||||
use ethereum_hashing::hash;
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use slog::{debug, error, trace, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
|
@ -5,7 +5,7 @@ use slog::{info, warn, Logger};
|
||||
use state_processing::state_advance::complete_state_advance;
|
||||
use state_processing::{
|
||||
per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext,
|
||||
VerifyBlockRoot,
|
||||
StateProcessingStrategy, VerifyBlockRoot,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@ -177,6 +177,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
||||
&mut state,
|
||||
&block,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut ctxt,
|
||||
spec,
|
||||
|
@ -193,13 +193,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
oldest_block_parent: expected_block_root,
|
||||
..anchor_info
|
||||
};
|
||||
let backfill_complete = new_anchor.block_backfill_complete();
|
||||
let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot);
|
||||
self.store
|
||||
.compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?;
|
||||
|
||||
// If backfill has completed and the chain is configured to reconstruct historic states,
|
||||
// send a message to the background migrator instructing it to begin reconstruction.
|
||||
if backfill_complete && self.config.reconstruct_historic_states {
|
||||
// This can only happen if we have backfilled all the way to genesis.
|
||||
if backfill_complete
|
||||
&& self.genesis_backfill_slot == Slot::new(0)
|
||||
&& self.config.reconstruct_historic_states
|
||||
{
|
||||
self.store_migrator.process_reconstruction();
|
||||
}
|
||||
|
||||
|
@ -876,6 +876,14 @@ lazy_static! {
|
||||
"beacon_sync_committee_message_gossip_verification_seconds",
|
||||
"Full runtime of sync contribution gossip verification"
|
||||
);
|
||||
pub static ref SYNC_MESSAGE_EQUIVOCATIONS: Result<IntCounter> = try_create_int_counter(
|
||||
"sync_message_equivocations_total",
|
||||
"Number of sync messages with the same validator index for different blocks"
|
||||
);
|
||||
pub static ref SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD: Result<IntCounter> = try_create_int_counter(
|
||||
"sync_message_equivocations_to_head_total",
|
||||
"Number of sync message which conflict with a previous message but elect the head"
|
||||
);
|
||||
|
||||
/*
|
||||
* Sync Committee Contribution Verification
|
||||
|
@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use types::slot_data::SlotData;
|
||||
use types::{Epoch, EthSpec, Slot, Unsigned};
|
||||
use types::{Epoch, EthSpec, Hash256, Slot, Unsigned};
|
||||
|
||||
/// The maximum capacity of the `AutoPruningEpochContainer`.
|
||||
///
|
||||
@ -39,10 +39,10 @@ pub const MAX_CACHED_EPOCHS: u64 = 3;
|
||||
|
||||
pub type ObservedAttesters<E> = AutoPruningEpochContainer<EpochBitfield, E>;
|
||||
pub type ObservedSyncContributors<E> =
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, SyncContributorSlotHashSet<E>, E>;
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, Hash256, SyncContributorSlotHashSet<E>, E>;
|
||||
pub type ObservedAggregators<E> = AutoPruningEpochContainer<EpochHashSet, E>;
|
||||
pub type ObservedSyncAggregators<E> =
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, SyncAggregatorSlotHashSet, E>;
|
||||
AutoPruningSlotContainer<SlotSubcommitteeIndex, (), SyncAggregatorSlotHashSet, E>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
@ -62,7 +62,7 @@ pub enum Error {
|
||||
}
|
||||
|
||||
/// Implemented on an item in an `AutoPruningContainer`.
|
||||
pub trait Item {
|
||||
pub trait Item<T> {
|
||||
/// Instantiate `Self` with the given `capacity`.
|
||||
fn with_capacity(capacity: usize) -> Self;
|
||||
|
||||
@ -75,11 +75,11 @@ pub trait Item {
|
||||
/// Returns the number of validators that have been observed by `self`.
|
||||
fn validator_count(&self) -> usize;
|
||||
|
||||
/// Store `validator_index` in `self`.
|
||||
fn insert(&mut self, validator_index: usize) -> bool;
|
||||
/// Store `validator_index` and `value` in `self`.
|
||||
fn insert(&mut self, validator_index: usize, value: T) -> bool;
|
||||
|
||||
/// Returns `true` if `validator_index` has been stored in `self`.
|
||||
fn contains(&self, validator_index: usize) -> bool;
|
||||
/// Returns `Some(T)` if there is an entry for `validator_index`.
|
||||
fn get(&self, validator_index: usize) -> Option<T>;
|
||||
}
|
||||
|
||||
/// Stores a `BitVec` that represents which validator indices have attested or sent sync committee
|
||||
@ -88,7 +88,7 @@ pub struct EpochBitfield {
|
||||
bitfield: BitVec,
|
||||
}
|
||||
|
||||
impl Item for EpochBitfield {
|
||||
impl Item<()> for EpochBitfield {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
bitfield: BitVec::with_capacity(capacity),
|
||||
@ -108,7 +108,7 @@ impl Item for EpochBitfield {
|
||||
self.bitfield.iter().filter(|bit| **bit).count()
|
||||
}
|
||||
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
fn insert(&mut self, validator_index: usize, _value: ()) -> bool {
|
||||
self.bitfield
|
||||
.get_mut(validator_index)
|
||||
.map(|mut bit| {
|
||||
@ -129,8 +129,11 @@ impl Item for EpochBitfield {
|
||||
})
|
||||
}
|
||||
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.bitfield.get(validator_index).map_or(false, |bit| *bit)
|
||||
fn get(&self, validator_index: usize) -> Option<()> {
|
||||
self.bitfield
|
||||
.get(validator_index)
|
||||
.map_or(false, |bit| *bit)
|
||||
.then_some(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,7 +143,7 @@ pub struct EpochHashSet {
|
||||
set: HashSet<usize>,
|
||||
}
|
||||
|
||||
impl Item for EpochHashSet {
|
||||
impl Item<()> for EpochHashSet {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
@ -163,27 +166,27 @@ impl Item for EpochHashSet {
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
fn insert(&mut self, validator_index: usize, _value: ()) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
fn get(&self, validator_index: usize) -> Option<()> {
|
||||
self.set.contains(&validator_index).then_some(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a `HashSet` of which validator indices have created a sync aggregate during a
|
||||
/// slot.
|
||||
pub struct SyncContributorSlotHashSet<E> {
|
||||
set: HashSet<usize>,
|
||||
map: HashMap<usize, Hash256>,
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Item for SyncContributorSlotHashSet<E> {
|
||||
impl<E: EthSpec> Item<Hash256> for SyncContributorSlotHashSet<E> {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
map: HashMap::with_capacity(capacity),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -194,22 +197,24 @@ impl<E: EthSpec> Item for SyncContributorSlotHashSet<E> {
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.set.len()
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.set.len()
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
fn insert(&mut self, validator_index: usize, beacon_block_root: Hash256) -> bool {
|
||||
self.map
|
||||
.insert(validator_index, beacon_block_root)
|
||||
.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
fn get(&self, validator_index: usize) -> Option<Hash256> {
|
||||
self.map.get(&validator_index).copied()
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,7 +224,7 @@ pub struct SyncAggregatorSlotHashSet {
|
||||
set: HashSet<usize>,
|
||||
}
|
||||
|
||||
impl Item for SyncAggregatorSlotHashSet {
|
||||
impl Item<()> for SyncAggregatorSlotHashSet {
|
||||
fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
set: HashSet::with_capacity(capacity),
|
||||
@ -241,13 +246,13 @@ impl Item for SyncAggregatorSlotHashSet {
|
||||
|
||||
/// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was
|
||||
/// already in the set.
|
||||
fn insert(&mut self, validator_index: usize) -> bool {
|
||||
fn insert(&mut self, validator_index: usize, _value: ()) -> bool {
|
||||
!self.set.insert(validator_index)
|
||||
}
|
||||
|
||||
/// Returns `true` if the `validator_index` is in the set.
|
||||
fn contains(&self, validator_index: usize) -> bool {
|
||||
self.set.contains(&validator_index)
|
||||
fn get(&self, validator_index: usize) -> Option<()> {
|
||||
self.set.contains(&validator_index).then_some(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,7 +280,7 @@ impl<T, E: EthSpec> Default for AutoPruningEpochContainer<T, E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
impl<T: Item<()>, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
/// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has
|
||||
/// previously been observed for `validator_index`.
|
||||
///
|
||||
@ -293,7 +298,7 @@ impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
self.prune(epoch);
|
||||
|
||||
if let Some(item) = self.items.get_mut(&epoch) {
|
||||
Ok(item.insert(validator_index))
|
||||
Ok(item.insert(validator_index, ()))
|
||||
} else {
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier epoch.
|
||||
@ -309,7 +314,7 @@ impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
|
||||
|
||||
let mut item = T::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
item.insert(validator_index, ());
|
||||
self.items.insert(epoch, item);
|
||||
|
||||
Ok(false)
|
||||
@ -333,7 +338,7 @@ impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
let exists = self
|
||||
.items
|
||||
.get(&epoch)
|
||||
.map_or(false, |item| item.contains(validator_index));
|
||||
.map_or(false, |item| item.get(validator_index).is_some());
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
@ -392,7 +397,7 @@ impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
pub fn index_seen_at_epoch(&self, index: usize, epoch: Epoch) -> bool {
|
||||
self.items
|
||||
.get(&epoch)
|
||||
.map(|item| item.contains(index))
|
||||
.map(|item| item.get(index).is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
@ -405,23 +410,63 @@ impl<T: Item, E: EthSpec> AutoPruningEpochContainer<T, E> {
|
||||
/// sync contributions with an epoch prior to `data.slot - 3` will be cleared from the cache.
|
||||
///
|
||||
/// `V` should be set to a `SyncAggregatorSlotHashSet` or a `SyncContributorSlotHashSet`.
|
||||
pub struct AutoPruningSlotContainer<K: SlotData + Eq + Hash, V, E: EthSpec> {
|
||||
pub struct AutoPruningSlotContainer<K: SlotData + Eq + Hash, S, V, E: EthSpec> {
|
||||
lowest_permissible_slot: Slot,
|
||||
items: HashMap<K, V>,
|
||||
_phantom: PhantomData<E>,
|
||||
_phantom_e: PhantomData<E>,
|
||||
_phantom_s: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<K: SlotData + Eq + Hash, V, E: EthSpec> Default for AutoPruningSlotContainer<K, V, E> {
|
||||
impl<K: SlotData + Eq + Hash, S, V, E: EthSpec> Default for AutoPruningSlotContainer<K, S, V, E> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lowest_permissible_slot: Slot::new(0),
|
||||
items: HashMap::new(),
|
||||
_phantom: PhantomData,
|
||||
_phantom_e: PhantomData,
|
||||
_phantom_s: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: SlotData + Eq + Hash, V: Item, E: EthSpec> AutoPruningSlotContainer<K, V, E> {
|
||||
impl<K: SlotData + Eq + Hash + Copy, S, V: Item<S>, E: EthSpec>
|
||||
AutoPruningSlotContainer<K, S, V, E>
|
||||
{
|
||||
/// Observes the given `value` for the given `validator_index`.
|
||||
///
|
||||
/// The `override_observation` function is supplied `previous_observation`
|
||||
/// and `value`. If it returns `true`, then any existing observation will be
|
||||
/// overridden.
|
||||
///
|
||||
/// This function returns `Some` if:
|
||||
/// - An observation already existed for the validator, AND,
|
||||
/// - The `override_observation` function returned `false`.
|
||||
///
|
||||
/// Alternatively, it returns `None` if:
|
||||
/// - An observation did not already exist for the given validator, OR,
|
||||
/// - The `override_observation` function returned `true`.
|
||||
pub fn observe_validator_with_override<F>(
|
||||
&mut self,
|
||||
key: K,
|
||||
validator_index: usize,
|
||||
value: S,
|
||||
override_observation: F,
|
||||
) -> Result<Option<S>, Error>
|
||||
where
|
||||
F: Fn(&S, &S) -> bool,
|
||||
{
|
||||
if let Some(prev_observation) = self.observation_for_validator(key, validator_index)? {
|
||||
if override_observation(&prev_observation, &value) {
|
||||
self.observe_validator(key, validator_index, value)?;
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(prev_observation))
|
||||
}
|
||||
} else {
|
||||
self.observe_validator(key, validator_index, value)?;
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Observe that `validator_index` has produced a sync committee message. Returns `Ok(true)` if
|
||||
/// the sync committee message has previously been observed for `validator_index`.
|
||||
///
|
||||
@ -429,14 +474,19 @@ impl<K: SlotData + Eq + Hash, V: Item, E: EthSpec> AutoPruningSlotContainer<K, V
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `key.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn observe_validator(&mut self, key: K, validator_index: usize) -> Result<bool, Error> {
|
||||
pub fn observe_validator(
|
||||
&mut self,
|
||||
key: K,
|
||||
validator_index: usize,
|
||||
value: S,
|
||||
) -> Result<bool, Error> {
|
||||
let slot = key.get_slot();
|
||||
self.sanitize_request(slot, validator_index)?;
|
||||
|
||||
self.prune(slot);
|
||||
|
||||
if let Some(item) = self.items.get_mut(&key) {
|
||||
Ok(item.insert(validator_index))
|
||||
Ok(item.insert(validator_index, value))
|
||||
} else {
|
||||
// To avoid re-allocations, try and determine a rough initial capacity for the new item
|
||||
// by obtaining the mean size of all items in earlier slot.
|
||||
@ -452,32 +502,45 @@ impl<K: SlotData + Eq + Hash, V: Item, E: EthSpec> AutoPruningSlotContainer<K, V
|
||||
let initial_capacity = sum.checked_div(count).unwrap_or_else(V::default_capacity);
|
||||
|
||||
let mut item = V::with_capacity(initial_capacity);
|
||||
item.insert(validator_index);
|
||||
item.insert(validator_index, value);
|
||||
self.items.insert(key, item);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(true)` if the `validator_index` has already produced a conflicting sync committee message.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `key.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
// Identical to `Self::observation_for_validator` but discards the
|
||||
// observation, simply returning `true` if the validator has been observed
|
||||
// at all.
|
||||
pub fn validator_has_been_observed(
|
||||
&self,
|
||||
key: K,
|
||||
validator_index: usize,
|
||||
) -> Result<bool, Error> {
|
||||
self.observation_for_validator(key, validator_index)
|
||||
.map(|observation| observation.is_some())
|
||||
}
|
||||
|
||||
/// Returns `Ok(Some)` if the `validator_index` has already produced a
|
||||
/// conflicting sync committee message.
|
||||
///
|
||||
/// ## Errors
|
||||
///
|
||||
/// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`.
|
||||
/// - `key.slot` is earlier than `self.lowest_permissible_slot`.
|
||||
pub fn observation_for_validator(
|
||||
&self,
|
||||
key: K,
|
||||
validator_index: usize,
|
||||
) -> Result<Option<S>, Error> {
|
||||
self.sanitize_request(key.get_slot(), validator_index)?;
|
||||
|
||||
let exists = self
|
||||
let observation = self
|
||||
.items
|
||||
.get(&key)
|
||||
.map_or(false, |item| item.contains(validator_index));
|
||||
.and_then(|item| item.get(validator_index));
|
||||
|
||||
Ok(exists)
|
||||
Ok(observation)
|
||||
}
|
||||
|
||||
/// Returns the number of validators that have been observed at the given `slot`. Returns
|
||||
@ -561,6 +624,116 @@ mod tests {
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
#[test]
|
||||
fn value_storage() {
|
||||
type Container = AutoPruningSlotContainer<Slot, Hash256, SyncContributorSlotHashSet<E>, E>;
|
||||
|
||||
let mut store: Container = <_>::default();
|
||||
let key = Slot::new(0);
|
||||
let validator_index = 0;
|
||||
let value = Hash256::zero();
|
||||
|
||||
// Assert there is no entry.
|
||||
assert!(store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert!(!store
|
||||
.validator_has_been_observed(key, validator_index)
|
||||
.unwrap());
|
||||
|
||||
// Add an entry.
|
||||
assert!(!store
|
||||
.observe_validator(key, validator_index, value)
|
||||
.unwrap());
|
||||
|
||||
// Assert there is a correct entry.
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
Some(value)
|
||||
);
|
||||
assert!(store
|
||||
.validator_has_been_observed(key, validator_index)
|
||||
.unwrap());
|
||||
|
||||
let alternate_value = Hash256::from_low_u64_be(1);
|
||||
|
||||
// Assert that override false does not override.
|
||||
assert_eq!(
|
||||
store
|
||||
.observe_validator_with_override(key, validator_index, alternate_value, |_, _| {
|
||||
false
|
||||
})
|
||||
.unwrap(),
|
||||
Some(value)
|
||||
);
|
||||
|
||||
// Assert that override true overrides and acts as if there was never an
|
||||
// entry there.
|
||||
assert_eq!(
|
||||
store
|
||||
.observe_validator_with_override(key, validator_index, alternate_value, |_, _| {
|
||||
true
|
||||
})
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
Some(alternate_value)
|
||||
);
|
||||
|
||||
// Reset the store.
|
||||
let mut store: Container = <_>::default();
|
||||
|
||||
// Asset that a new entry with override = false is inserted
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
store
|
||||
.observe_validator_with_override(key, validator_index, value, |_, _| { false })
|
||||
.unwrap(),
|
||||
None,
|
||||
);
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
Some(value)
|
||||
);
|
||||
|
||||
// Reset the store.
|
||||
let mut store: Container = <_>::default();
|
||||
|
||||
// Asset that a new entry with override = true is inserted
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
store
|
||||
.observe_validator_with_override(key, validator_index, value, |_, _| { true })
|
||||
.unwrap(),
|
||||
None,
|
||||
);
|
||||
assert_eq!(
|
||||
store
|
||||
.observation_for_validator(key, validator_index)
|
||||
.unwrap(),
|
||||
Some(value)
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! test_suite_epoch {
|
||||
($mod_name: ident, $type: ident) => {
|
||||
#[cfg(test)]
|
||||
@ -722,7 +895,7 @@ mod tests {
|
||||
test_suite_epoch!(observed_aggregators, ObservedAggregators);
|
||||
|
||||
macro_rules! test_suite_slot {
|
||||
($mod_name: ident, $type: ident) => {
|
||||
($mod_name: ident, $type: ident, $value: expr) => {
|
||||
#[cfg(test)]
|
||||
mod $mod_name {
|
||||
use super::*;
|
||||
@ -737,7 +910,7 @@ mod tests {
|
||||
"should indicate an unknown item is unknown"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(key, i),
|
||||
store.observe_validator(key, i, $value),
|
||||
Ok(false),
|
||||
"should observe new item"
|
||||
);
|
||||
@ -750,7 +923,7 @@ mod tests {
|
||||
"should indicate a known item is known"
|
||||
);
|
||||
assert_eq!(
|
||||
store.observe_validator(key, i),
|
||||
store.observe_validator(key, i, $value),
|
||||
Ok(true),
|
||||
"should acknowledge an existing item"
|
||||
);
|
||||
@ -997,6 +1170,10 @@ mod tests {
|
||||
}
|
||||
};
|
||||
}
|
||||
test_suite_slot!(observed_sync_contributors, ObservedSyncContributors);
|
||||
test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators);
|
||||
test_suite_slot!(
|
||||
observed_sync_contributors,
|
||||
ObservedSyncContributors,
|
||||
Hash256::zero()
|
||||
);
|
||||
test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators, ());
|
||||
}
|
||||
|
@ -1,17 +1,41 @@
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11;
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV11, PersistedForkChoiceStoreV17};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
use superstruct::superstruct;
|
||||
|
||||
// If adding a new version you should update this type alias and fix the breakages.
|
||||
pub type PersistedForkChoice = PersistedForkChoiceV11;
|
||||
pub type PersistedForkChoice = PersistedForkChoiceV17;
|
||||
|
||||
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
#[superstruct(
|
||||
variants(V11, V17),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: fork_choice::PersistedForkChoice,
|
||||
#[superstruct(only(V11))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV11,
|
||||
#[superstruct(only(V17))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV17,
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoice> for PersistedForkChoiceV11 {
|
||||
fn into(self) -> PersistedForkChoice {
|
||||
PersistedForkChoice {
|
||||
fork_choice: self.fork_choice,
|
||||
fork_choice_store: self.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoiceV11> for PersistedForkChoice {
|
||||
fn into(self) -> PersistedForkChoiceV11 {
|
||||
PersistedForkChoiceV11 {
|
||||
fork_choice: self.fork_choice,
|
||||
fork_choice_store: self.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_store_item {
|
||||
@ -33,3 +57,4 @@ macro_rules! impl_store_item {
|
||||
}
|
||||
|
||||
impl_store_item!(PersistedForkChoiceV11);
|
||||
impl_store_item!(PersistedForkChoiceV17);
|
||||
|
@ -4,6 +4,7 @@ mod migration_schema_v13;
|
||||
mod migration_schema_v14;
|
||||
mod migration_schema_v15;
|
||||
mod migration_schema_v16;
|
||||
mod migration_schema_v17;
|
||||
|
||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
||||
use crate::eth1_chain::SszEth1;
|
||||
@ -141,6 +142,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
let ops = migration_schema_v16::downgrade_from_v16::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
(SchemaVersion(16), SchemaVersion(17)) => {
|
||||
let ops = migration_schema_v17::upgrade_to_v17::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
(SchemaVersion(17), SchemaVersion(16)) => {
|
||||
let ops = migration_schema_v17::downgrade_from_v17::<T>(db.clone(), log)?;
|
||||
db.store_schema_version_atomically(to, ops)
|
||||
}
|
||||
// Anything else is an error.
|
||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||
target_version: to,
|
||||
|
@ -0,0 +1,88 @@
|
||||
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV11, PersistedForkChoiceV17};
|
||||
use proto_array::core::{SszContainerV16, SszContainerV17};
|
||||
use slog::{debug, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use std::sync::Arc;
|
||||
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
|
||||
|
||||
pub fn upgrade_fork_choice(
|
||||
mut fork_choice: PersistedForkChoiceV11,
|
||||
) -> Result<PersistedForkChoiceV17, Error> {
|
||||
let ssz_container_v16 = SszContainerV16::from_ssz_bytes(
|
||||
&fork_choice.fork_choice.proto_array_bytes,
|
||||
)
|
||||
.map_err(|e| {
|
||||
Error::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let ssz_container_v17: SszContainerV17 = ssz_container_v16.try_into().map_err(|e| {
|
||||
Error::SchemaMigrationError(format!(
|
||||
"Missing checkpoint during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
fork_choice.fork_choice.proto_array_bytes = ssz_container_v17.as_ssz_bytes();
|
||||
|
||||
Ok(fork_choice.into())
|
||||
}
|
||||
|
||||
pub fn downgrade_fork_choice(
|
||||
mut fork_choice: PersistedForkChoiceV17,
|
||||
) -> Result<PersistedForkChoiceV11, Error> {
|
||||
let ssz_container_v17 = SszContainerV17::from_ssz_bytes(
|
||||
&fork_choice.fork_choice.proto_array_bytes,
|
||||
)
|
||||
.map_err(|e| {
|
||||
Error::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let ssz_container_v16: SszContainerV16 = ssz_container_v17.into();
|
||||
fork_choice.fork_choice.proto_array_bytes = ssz_container_v16.as_ssz_bytes();
|
||||
|
||||
Ok(fork_choice.into())
|
||||
}
|
||||
|
||||
pub fn upgrade_to_v17<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Get persisted_fork_choice.
|
||||
let v11 = db
|
||||
.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?
|
||||
.ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?;
|
||||
|
||||
let v17 = upgrade_fork_choice(v11)?;
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Removing unused best_justified_checkpoint from fork choice store."
|
||||
);
|
||||
|
||||
Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)])
|
||||
}
|
||||
|
||||
pub fn downgrade_from_v17<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Get persisted_fork_choice.
|
||||
let v17 = db
|
||||
.get_item::<PersistedForkChoiceV17>(&FORK_CHOICE_DB_KEY)?
|
||||
.ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?;
|
||||
|
||||
let v11 = downgrade_fork_choice(v17)?;
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Adding junk best_justified_checkpoint to fork choice store."
|
||||
);
|
||||
|
||||
Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)])
|
||||
}
|
@ -1,10 +1,18 @@
|
||||
use crate::{metrics, BeaconChainError};
|
||||
use lru::LruCache;
|
||||
use oneshot_broadcast::{oneshot, Receiver, Sender};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256};
|
||||
|
||||
/// The size of the LRU cache that stores committee caches for quicker verification.
|
||||
use itertools::Itertools;
|
||||
use slog::{debug, Logger};
|
||||
|
||||
use oneshot_broadcast::{oneshot, Receiver, Sender};
|
||||
use types::{
|
||||
beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256,
|
||||
RelativeEpoch,
|
||||
};
|
||||
|
||||
use crate::{metrics, BeaconChainError};
|
||||
|
||||
/// The size of the cache that stores committee caches for quicker verification.
|
||||
///
|
||||
/// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash +
|
||||
/// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this
|
||||
@ -45,18 +53,24 @@ impl CacheItem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides an LRU cache for `CommitteeCache`.
|
||||
/// Provides a cache for `CommitteeCache`.
|
||||
///
|
||||
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
|
||||
/// a find/replace error.
|
||||
pub struct ShufflingCache {
|
||||
cache: LruCache<AttestationShufflingId, CacheItem>,
|
||||
cache: HashMap<AttestationShufflingId, CacheItem>,
|
||||
cache_size: usize,
|
||||
head_shuffling_ids: BlockShufflingIds,
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
impl ShufflingCache {
|
||||
pub fn new(cache_size: usize) -> Self {
|
||||
pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(cache_size),
|
||||
cache: HashMap::new(),
|
||||
cache_size,
|
||||
head_shuffling_ids,
|
||||
logger,
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,7 +90,7 @@ impl ShufflingCache {
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS);
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
|
||||
let ready = CacheItem::Committee(committee);
|
||||
self.cache.put(key.clone(), ready.clone());
|
||||
self.insert_cache_item(key.clone(), ready.clone());
|
||||
Some(ready)
|
||||
}
|
||||
// The promise has not yet been resolved. Return the promise so the caller can await
|
||||
@ -93,13 +107,12 @@ impl ShufflingCache {
|
||||
// It's worth noting that this is the only place where we removed unresolved
|
||||
// promises from the cache. This means unresolved promises will only be removed if
|
||||
// we try to access them again. This is OK, since the promises don't consume much
|
||||
// memory and the nature of the LRU cache means that future, relevant entries will
|
||||
// still be added to the cache. We expect that *all* promises should be resolved,
|
||||
// unless there is a programming or database error.
|
||||
// memory. We expect that *all* promises should be resolved, unless there is a
|
||||
// programming or database error.
|
||||
Err(oneshot_broadcast::Error::SenderDropped) => {
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_FAILS);
|
||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES);
|
||||
self.cache.pop(key);
|
||||
self.cache.remove(key);
|
||||
None
|
||||
}
|
||||
},
|
||||
@ -112,13 +125,13 @@ impl ShufflingCache {
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &AttestationShufflingId) -> bool {
|
||||
self.cache.contains(key)
|
||||
self.cache.contains_key(key)
|
||||
}
|
||||
|
||||
pub fn insert_committee_cache<T: ToArcCommitteeCache>(
|
||||
pub fn insert_committee_cache<C: ToArcCommitteeCache>(
|
||||
&mut self,
|
||||
key: AttestationShufflingId,
|
||||
committee_cache: &T,
|
||||
committee_cache: &C,
|
||||
) {
|
||||
if self
|
||||
.cache
|
||||
@ -127,13 +140,55 @@ impl ShufflingCache {
|
||||
// worth two in the promise-bush!
|
||||
.map_or(true, CacheItem::is_promise)
|
||||
{
|
||||
self.cache.put(
|
||||
self.insert_cache_item(
|
||||
key,
|
||||
CacheItem::Committee(committee_cache.to_arc_committee_cache()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Prunes the cache first before inserting a new cache item.
|
||||
fn insert_cache_item(&mut self, key: AttestationShufflingId, cache_item: CacheItem) {
|
||||
self.prune_cache();
|
||||
self.cache.insert(key, cache_item);
|
||||
}
|
||||
|
||||
/// Prunes the `cache` to keep the size below the `cache_size` limit, based on the following
|
||||
/// preferences:
|
||||
/// - Entries from more recent epochs are preferred over older ones.
|
||||
/// - Entries with shuffling ids matching the head's previous, current, and future epochs must
|
||||
/// not be pruned.
|
||||
fn prune_cache(&mut self) {
|
||||
let target_cache_size = self.cache_size.saturating_sub(1);
|
||||
if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) {
|
||||
let shuffling_ids_to_prune = self
|
||||
.cache
|
||||
.keys()
|
||||
.sorted_by_key(|key| key.shuffling_epoch)
|
||||
.filter(|shuffling_id| {
|
||||
Some(shuffling_id)
|
||||
!= self
|
||||
.head_shuffling_ids
|
||||
.id_for_epoch(shuffling_id.shuffling_epoch)
|
||||
.as_ref()
|
||||
.as_ref()
|
||||
})
|
||||
.take(prune_count)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for shuffling_id in shuffling_ids_to_prune.iter() {
|
||||
debug!(
|
||||
self.logger,
|
||||
"Removing old shuffling from cache";
|
||||
"shuffling_epoch" => shuffling_id.shuffling_epoch,
|
||||
"shuffling_decision_block" => ?shuffling_id.shuffling_decision_block
|
||||
);
|
||||
self.cache.remove(shuffling_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_promise(
|
||||
&mut self,
|
||||
key: AttestationShufflingId,
|
||||
@ -148,9 +203,17 @@ impl ShufflingCache {
|
||||
}
|
||||
|
||||
let (sender, receiver) = oneshot();
|
||||
self.cache.put(key, CacheItem::Promise(receiver));
|
||||
self.insert_cache_item(key, CacheItem::Promise(receiver));
|
||||
Ok(sender)
|
||||
}
|
||||
|
||||
/// Inform the cache that the shuffling decision roots for the head has changed.
|
||||
///
|
||||
/// The shufflings for the head's previous, current, and future epochs will never be ejected from
|
||||
/// the cache during `Self::insert_cache_item`.
|
||||
pub fn update_head_shuffling_ids(&mut self, head_shuffling_ids: BlockShufflingIds) {
|
||||
self.head_shuffling_ids = head_shuffling_ids;
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper trait to allow lazy-cloning of the committee cache when inserting into the cache.
|
||||
@ -170,26 +233,29 @@ impl ToArcCommitteeCache for Arc<CommitteeCache> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ShufflingCache {
|
||||
fn default() -> Self {
|
||||
Self::new(DEFAULT_CACHE_SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the shuffling IDs for a beacon block.
|
||||
#[derive(Clone)]
|
||||
pub struct BlockShufflingIds {
|
||||
pub current: AttestationShufflingId,
|
||||
pub next: AttestationShufflingId,
|
||||
pub previous: Option<AttestationShufflingId>,
|
||||
pub block_root: Hash256,
|
||||
}
|
||||
|
||||
impl BlockShufflingIds {
|
||||
/// Returns the shuffling ID for the given epoch.
|
||||
///
|
||||
/// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`.
|
||||
/// Returns `None` if `epoch` is prior to `self.previous?.shuffling_epoch` or
|
||||
/// `self.current.shuffling_epoch` (if `previous` is `None`).
|
||||
pub fn id_for_epoch(&self, epoch: Epoch) -> Option<AttestationShufflingId> {
|
||||
if epoch == self.current.shuffling_epoch {
|
||||
Some(self.current.clone())
|
||||
} else if self
|
||||
.previous
|
||||
.as_ref()
|
||||
.map_or(false, |id| id.shuffling_epoch == epoch)
|
||||
{
|
||||
self.previous.clone()
|
||||
} else if epoch == self.next.shuffling_epoch {
|
||||
Some(self.next.clone())
|
||||
} else if epoch > self.next.shuffling_epoch {
|
||||
@ -201,18 +267,57 @@ impl BlockShufflingIds {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_from_head<T: EthSpec>(
|
||||
head_block_root: Hash256,
|
||||
head_state: &BeaconState<T>,
|
||||
) -> Result<Self, String> {
|
||||
let get_shuffling_id = |relative_epoch| {
|
||||
AttestationShufflingId::new(head_block_root, head_state, relative_epoch).map_err(|e| {
|
||||
format!(
|
||||
"Unable to get attester shuffling decision slot for the epoch {:?}: {:?}",
|
||||
relative_epoch, e
|
||||
)
|
||||
})
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
current: get_shuffling_id(RelativeEpoch::Current)?,
|
||||
next: get_shuffling_id(RelativeEpoch::Next)?,
|
||||
previous: Some(get_shuffling_id(RelativeEpoch::Previous)?),
|
||||
block_root: head_block_root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Disable tests in debug since the beacon chain harness is slow unless in release.
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::EphemeralHarnessType;
|
||||
use task_executor::test_utils::null_logger;
|
||||
use types::*;
|
||||
|
||||
type BeaconChainHarness =
|
||||
crate::test_utils::BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>>;
|
||||
use crate::test_utils::EphemeralHarnessType;
|
||||
|
||||
use super::*;
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
type TestBeaconChainType = EphemeralHarnessType<E>;
|
||||
type BeaconChainHarness = crate::test_utils::BeaconChainHarness<TestBeaconChainType>;
|
||||
const TEST_CACHE_SIZE: usize = 5;
|
||||
|
||||
// Creates a new shuffling cache for testing
|
||||
fn new_shuffling_cache() -> ShufflingCache {
|
||||
let current_epoch = 8;
|
||||
let head_shuffling_ids = BlockShufflingIds {
|
||||
current: shuffling_id(current_epoch),
|
||||
next: shuffling_id(current_epoch + 1),
|
||||
previous: Some(shuffling_id(current_epoch - 1)),
|
||||
block_root: Hash256::from_low_u64_le(0),
|
||||
};
|
||||
let logger = null_logger().unwrap();
|
||||
ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger)
|
||||
}
|
||||
|
||||
/// Returns two different committee caches for testing.
|
||||
fn committee_caches() -> (Arc<CommitteeCache>, Arc<CommitteeCache>) {
|
||||
@ -249,7 +354,7 @@ mod test {
|
||||
fn resolved_promise() {
|
||||
let (committee_a, _) = committee_caches();
|
||||
let id_a = shuffling_id(1);
|
||||
let mut cache = ShufflingCache::default();
|
||||
let mut cache = new_shuffling_cache();
|
||||
|
||||
// Create a promise.
|
||||
let sender = cache.create_promise(id_a.clone()).unwrap();
|
||||
@ -276,7 +381,7 @@ mod test {
|
||||
#[test]
|
||||
fn unresolved_promise() {
|
||||
let id_a = shuffling_id(1);
|
||||
let mut cache = ShufflingCache::default();
|
||||
let mut cache = new_shuffling_cache();
|
||||
|
||||
// Create a promise.
|
||||
let sender = cache.create_promise(id_a.clone()).unwrap();
|
||||
@ -301,7 +406,7 @@ mod test {
|
||||
fn two_promises() {
|
||||
let (committee_a, committee_b) = committee_caches();
|
||||
let (id_a, id_b) = (shuffling_id(1), shuffling_id(2));
|
||||
let mut cache = ShufflingCache::default();
|
||||
let mut cache = new_shuffling_cache();
|
||||
|
||||
// Create promise A.
|
||||
let sender_a = cache.create_promise(id_a.clone()).unwrap();
|
||||
@ -355,7 +460,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn too_many_promises() {
|
||||
let mut cache = ShufflingCache::default();
|
||||
let mut cache = new_shuffling_cache();
|
||||
|
||||
for i in 0..MAX_CONCURRENT_PROMISES {
|
||||
cache.create_promise(shuffling_id(i as u64)).unwrap();
|
||||
@ -375,4 +480,105 @@ mod test {
|
||||
"the cache should have two entries"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_insert_committee_cache() {
|
||||
let mut cache = new_shuffling_cache();
|
||||
let id_a = shuffling_id(1);
|
||||
let committee_cache_a = Arc::new(CommitteeCache::default());
|
||||
cache.insert_committee_cache(id_a.clone(), &committee_cache_a);
|
||||
assert!(
|
||||
matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee_cache) if committee_cache == committee_cache_a),
|
||||
"should insert committee cache"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_prune_committee_cache_with_lowest_epoch() {
|
||||
let mut cache = new_shuffling_cache();
|
||||
let shuffling_id_and_committee_caches = (0..(TEST_CACHE_SIZE + 1))
|
||||
.map(|i| (shuffling_id(i as u64), Arc::new(CommitteeCache::default())))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (shuffling_id, committee_cache) in shuffling_id_and_committee_caches.iter() {
|
||||
cache.insert_committee_cache(shuffling_id.clone(), committee_cache);
|
||||
}
|
||||
|
||||
for i in 1..(TEST_CACHE_SIZE + 1) {
|
||||
assert!(
|
||||
cache.contains(&shuffling_id_and_committee_caches.get(i).unwrap().0),
|
||||
"should contain recent epoch shuffling ids"
|
||||
);
|
||||
}
|
||||
|
||||
assert!(
|
||||
!cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0),
|
||||
"should not contain oldest epoch shuffling id"
|
||||
);
|
||||
assert_eq!(
|
||||
cache.cache.len(),
|
||||
cache.cache_size,
|
||||
"should limit cache size"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_retain_head_state_shufflings() {
|
||||
let mut cache = new_shuffling_cache();
|
||||
let current_epoch = 10;
|
||||
let committee_cache = Arc::new(CommitteeCache::default());
|
||||
|
||||
// Insert a few entries for next the epoch with different decision roots.
|
||||
for i in 0..TEST_CACHE_SIZE {
|
||||
let shuffling_id = AttestationShufflingId {
|
||||
shuffling_epoch: (current_epoch + 1).into(),
|
||||
shuffling_decision_block: Hash256::from_low_u64_be(current_epoch + i as u64),
|
||||
};
|
||||
cache.insert_committee_cache(shuffling_id, &committee_cache);
|
||||
}
|
||||
|
||||
// Now, update the head shuffling ids
|
||||
let head_shuffling_ids = BlockShufflingIds {
|
||||
current: shuffling_id(current_epoch),
|
||||
next: shuffling_id(current_epoch + 1),
|
||||
previous: Some(shuffling_id(current_epoch - 1)),
|
||||
block_root: Hash256::from_low_u64_le(42),
|
||||
};
|
||||
cache.update_head_shuffling_ids(head_shuffling_ids.clone());
|
||||
|
||||
// Insert head state shuffling ids. Should not be overridden by other shuffling ids.
|
||||
cache.insert_committee_cache(head_shuffling_ids.current.clone(), &committee_cache);
|
||||
cache.insert_committee_cache(head_shuffling_ids.next.clone(), &committee_cache);
|
||||
cache.insert_committee_cache(
|
||||
head_shuffling_ids.previous.clone().unwrap(),
|
||||
&committee_cache,
|
||||
);
|
||||
|
||||
// Insert a few entries for older epochs.
|
||||
for i in 0..TEST_CACHE_SIZE {
|
||||
let shuffling_id = AttestationShufflingId {
|
||||
shuffling_epoch: Epoch::from(i),
|
||||
shuffling_decision_block: Hash256::from_low_u64_be(i as u64),
|
||||
};
|
||||
cache.insert_committee_cache(shuffling_id, &committee_cache);
|
||||
}
|
||||
|
||||
assert!(
|
||||
cache.contains(&head_shuffling_ids.current),
|
||||
"should retain head shuffling id for the current epoch."
|
||||
);
|
||||
assert!(
|
||||
cache.contains(&head_shuffling_ids.next),
|
||||
"should retain head shuffling id for the next epoch."
|
||||
);
|
||||
assert!(
|
||||
cache.contains(&head_shuffling_ids.previous.unwrap()),
|
||||
"should retain head shuffling id for previous epoch."
|
||||
);
|
||||
assert_eq!(
|
||||
cache.cache.len(),
|
||||
cache.cache_size,
|
||||
"should limit cache size"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -153,7 +153,21 @@ pub enum Error {
|
||||
/// It's unclear if this sync message is valid, however we have already observed a
|
||||
/// signature from this validator for this slot and should not observe
|
||||
/// another.
|
||||
PriorSyncCommitteeMessageKnown { validator_index: u64, slot: Slot },
|
||||
PriorSyncCommitteeMessageKnown {
|
||||
validator_index: u64,
|
||||
slot: Slot,
|
||||
prev_root: Hash256,
|
||||
new_root: Hash256,
|
||||
},
|
||||
/// We have already observed a contribution for the aggregator and refuse to
|
||||
/// process another.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// It's unclear if this sync message is valid, however we have already observed a
|
||||
/// signature from this validator for this slot and should not observe
|
||||
/// another.
|
||||
PriorSyncContributionMessageKnown { validator_index: u64, slot: Slot },
|
||||
/// The sync committee message was received on an invalid sync committee message subnet.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -378,10 +392,10 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> {
|
||||
if chain
|
||||
.observed_sync_aggregators
|
||||
.write()
|
||||
.observe_validator(observed_key, aggregator_index as usize)
|
||||
.observe_validator(observed_key, aggregator_index as usize, ())
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
return Err(Error::PriorSyncContributionMessageKnown {
|
||||
validator_index: aggregator_index,
|
||||
slot: contribution.slot,
|
||||
});
|
||||
@ -450,19 +464,40 @@ impl VerifiedSyncCommitteeMessage {
|
||||
// The sync committee message is the first valid message received for the participating validator
|
||||
// for the slot, sync_message.slot.
|
||||
let validator_index = sync_message.validator_index;
|
||||
if chain
|
||||
let head_root = chain.canonical_head.cached_head().head_block_root();
|
||||
let new_root = sync_message.beacon_block_root;
|
||||
let should_override_prev = |prev_root: &Hash256, new_root: &Hash256| {
|
||||
let roots_differ = new_root != prev_root;
|
||||
let new_elects_head = new_root == &head_root;
|
||||
|
||||
if roots_differ {
|
||||
// Track sync committee messages that differ from each other.
|
||||
metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS);
|
||||
if new_elects_head {
|
||||
// Track sync committee messages that swap from an old block to a new block.
|
||||
metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD);
|
||||
}
|
||||
}
|
||||
|
||||
roots_differ && new_elects_head
|
||||
};
|
||||
if let Some(prev_root) = chain
|
||||
.observed_sync_contributors
|
||||
.read()
|
||||
.validator_has_been_observed(
|
||||
.observation_for_validator(
|
||||
SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()),
|
||||
validator_index as usize,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot: sync_message.slot,
|
||||
});
|
||||
if !should_override_prev(&prev_root, &new_root) {
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot: sync_message.slot,
|
||||
prev_root,
|
||||
new_root,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// The aggregate signature of the sync committee message is valid.
|
||||
@ -474,18 +509,22 @@ impl VerifiedSyncCommitteeMessage {
|
||||
// It's important to double check that the sync committee message still hasn't been observed, since
|
||||
// there can be a race-condition if we receive two sync committee messages at the same time and
|
||||
// process them in different threads.
|
||||
if chain
|
||||
if let Some(prev_root) = chain
|
||||
.observed_sync_contributors
|
||||
.write()
|
||||
.observe_validator(
|
||||
.observe_validator_with_override(
|
||||
SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()),
|
||||
validator_index as usize,
|
||||
sync_message.beacon_block_root,
|
||||
should_override_prev,
|
||||
)
|
||||
.map_err(BeaconChainError::from)?
|
||||
{
|
||||
return Err(Error::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot: sync_message.slot,
|
||||
prev_root,
|
||||
new_root,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::blob_verification::{AsBlock, BlockWrapper};
|
||||
use crate::observed_operations::ObservationOutcome;
|
||||
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
pub use crate::{
|
||||
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
||||
@ -29,6 +30,7 @@ pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use kzg::{Kzg, TrustedSetup};
|
||||
use merkle_proof::MerkleTree;
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
use parking_lot::Mutex;
|
||||
use parking_lot::RwLockWriteGuard;
|
||||
use rand::rngs::StdRng;
|
||||
@ -43,7 +45,7 @@ use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use state_processing::per_block_processing::compute_timestamp_at_slot;
|
||||
use state_processing::{
|
||||
state_advance::{complete_state_advance, partial_state_advance},
|
||||
StateRootStrategy,
|
||||
StateProcessingStrategy,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
@ -67,7 +69,7 @@ const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
||||
//
|
||||
// You should mutate the `ChainSpec` prior to initialising the harness if you would like to use
|
||||
// a different value.
|
||||
pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::max_value();
|
||||
pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX;
|
||||
|
||||
pub type BaseHarnessType<TEthSpec, THotStore, TColdStore> =
|
||||
Witness<TestingSlotClock, CachingEth1Backend<TEthSpec>, TEthSpec, THotStore, TColdStore>;
|
||||
@ -88,7 +90,7 @@ pub type AddBlocksResult<E> = (
|
||||
BeaconState<E>,
|
||||
);
|
||||
|
||||
/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks.
|
||||
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum BlockStrategy {
|
||||
/// Produce blocks upon the canonical head (normal case).
|
||||
@ -104,7 +106,7 @@ pub enum BlockStrategy {
|
||||
},
|
||||
}
|
||||
|
||||
/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations.
|
||||
/// Indicates how the `BeaconChainHarness` should produce attestations.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum AttestationStrategy {
|
||||
/// All validators attest to whichever block the `BeaconChainHarness` has produced.
|
||||
@ -744,7 +746,7 @@ where
|
||||
pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option<BeaconState<E>> {
|
||||
self.chain
|
||||
.store
|
||||
.load_hot_state(&state_hash.into(), StateRootStrategy::Accurate)
|
||||
.load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
@ -767,6 +769,7 @@ where
|
||||
state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap()
|
||||
}
|
||||
|
||||
/// Returns a newly created block, signed by the proposer for the given slot.
|
||||
pub async fn make_block(
|
||||
&self,
|
||||
mut state: BeaconState<E>,
|
||||
@ -1003,31 +1006,31 @@ where
|
||||
head_block_root: SignedBeaconBlockHash,
|
||||
attestation_slot: Slot,
|
||||
) -> Vec<CommitteeAttestations<E>> {
|
||||
self.make_unaggregated_attestations_with_limit(
|
||||
let fork = self
|
||||
.spec
|
||||
.fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch()));
|
||||
self.make_unaggregated_attestations_with_opts(
|
||||
attesting_validators,
|
||||
state,
|
||||
state_root,
|
||||
head_block_root,
|
||||
attestation_slot,
|
||||
None,
|
||||
MakeAttestationOptions { limit: None, fork },
|
||||
)
|
||||
.0
|
||||
}
|
||||
|
||||
pub fn make_unaggregated_attestations_with_limit(
|
||||
pub fn make_unaggregated_attestations_with_opts(
|
||||
&self,
|
||||
attesting_validators: &[usize],
|
||||
state: &BeaconState<E>,
|
||||
state_root: Hash256,
|
||||
head_block_root: SignedBeaconBlockHash,
|
||||
attestation_slot: Slot,
|
||||
limit: Option<usize>,
|
||||
opts: MakeAttestationOptions,
|
||||
) -> (Vec<CommitteeAttestations<E>>, Vec<usize>) {
|
||||
let MakeAttestationOptions { limit, fork } = opts;
|
||||
let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap();
|
||||
let fork = self
|
||||
.spec
|
||||
.fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch()));
|
||||
|
||||
let attesters = Mutex::new(vec![]);
|
||||
|
||||
let attestations = state
|
||||
@ -1160,8 +1163,6 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Deprecated: Use make_unaggregated_attestations() instead.
|
||||
///
|
||||
/// A list of attestations for each committee for the given slot.
|
||||
///
|
||||
/// The first layer of the Vec is organised per committee. For example, if the return value is
|
||||
@ -1219,16 +1220,35 @@ where
|
||||
slot: Slot,
|
||||
limit: Option<usize>,
|
||||
) -> (HarnessAttestations<E>, Vec<usize>) {
|
||||
let (unaggregated_attestations, attesters) = self
|
||||
.make_unaggregated_attestations_with_limit(
|
||||
attesting_validators,
|
||||
state,
|
||||
state_root,
|
||||
block_hash,
|
||||
slot,
|
||||
limit,
|
||||
);
|
||||
let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch()));
|
||||
self.make_attestations_with_opts(
|
||||
attesting_validators,
|
||||
state,
|
||||
state_root,
|
||||
block_hash,
|
||||
slot,
|
||||
MakeAttestationOptions { limit, fork },
|
||||
)
|
||||
}
|
||||
|
||||
pub fn make_attestations_with_opts(
|
||||
&self,
|
||||
attesting_validators: &[usize],
|
||||
state: &BeaconState<E>,
|
||||
state_root: Hash256,
|
||||
block_hash: SignedBeaconBlockHash,
|
||||
slot: Slot,
|
||||
opts: MakeAttestationOptions,
|
||||
) -> (HarnessAttestations<E>, Vec<usize>) {
|
||||
let MakeAttestationOptions { fork, .. } = opts;
|
||||
let (unaggregated_attestations, attesters) = self.make_unaggregated_attestations_with_opts(
|
||||
attesting_validators,
|
||||
state,
|
||||
state_root,
|
||||
block_hash,
|
||||
slot,
|
||||
opts,
|
||||
);
|
||||
|
||||
let aggregated_attestations: Vec<Option<SignedAggregateAndProof<E>>> =
|
||||
unaggregated_attestations
|
||||
@ -1560,6 +1580,26 @@ where
|
||||
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||
}
|
||||
|
||||
pub fn add_bls_to_execution_change(
|
||||
&self,
|
||||
validator_index: u64,
|
||||
address: Address,
|
||||
) -> Result<(), String> {
|
||||
let signed_bls_change = self.make_bls_to_execution_change(validator_index, address);
|
||||
if let ObservationOutcome::New(verified_bls_change) = self
|
||||
.chain
|
||||
.verify_bls_to_execution_change_for_gossip(signed_bls_change)
|
||||
.expect("should verify BLS to execution change for gossip")
|
||||
{
|
||||
self.chain
|
||||
.import_bls_to_execution_change(verified_bls_change, ReceivedPreCapella::No)
|
||||
.then_some(())
|
||||
.ok_or("should import BLS to execution change to the op pool".to_string())
|
||||
} else {
|
||||
Err("should observe new BLS to execution change".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_bls_to_execution_change(
|
||||
&self,
|
||||
validator_index: u64,
|
||||
@ -2077,9 +2117,6 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots()
|
||||
/// instead
|
||||
///
|
||||
/// Advance the slot of the `BeaconChain`.
|
||||
///
|
||||
/// Does not produce blocks or attestations.
|
||||
@ -2093,18 +2130,6 @@ where
|
||||
self.chain.slot_clock.set_current_time(time);
|
||||
}
|
||||
|
||||
/// Deprecated: Use make_block() instead
|
||||
///
|
||||
/// Returns a newly created block, signed by the proposer for the given slot.
|
||||
pub async fn build_block(
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
_block_strategy: BlockStrategy,
|
||||
) -> (BlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
|
||||
self.make_block(state, slot).await
|
||||
}
|
||||
|
||||
/// Uses `Self::extend_chain` to build the chain out to the `target_slot`.
|
||||
pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 {
|
||||
if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() {
|
||||
@ -2140,8 +2165,6 @@ where
|
||||
.await
|
||||
}
|
||||
|
||||
/// Deprecated: Use add_attested_blocks_at_slots() instead
|
||||
///
|
||||
/// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the
|
||||
/// last-produced block (the head of the chain).
|
||||
///
|
||||
@ -2296,6 +2319,13 @@ impl<T: BeaconChainTypes> fmt::Debug for BeaconChainHarness<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MakeAttestationOptions {
|
||||
/// Produce exactly `limit` attestations.
|
||||
pub limit: Option<usize>,
|
||||
/// Fork to use for signing attestations.
|
||||
pub fork: Fork,
|
||||
}
|
||||
|
||||
pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
|
||||
let decorator = TermDecorator::new().build();
|
||||
let drain = FullFormat::new(decorator).build().fuse();
|
||||
|
@ -199,6 +199,7 @@ pub struct ValidatorMetrics {
|
||||
pub attestation_head_misses: u64,
|
||||
pub attestation_target_hits: u64,
|
||||
pub attestation_target_misses: u64,
|
||||
pub latest_attestation_inclusion_distance: u64,
|
||||
}
|
||||
|
||||
impl ValidatorMetrics {
|
||||
@ -225,6 +226,10 @@ impl ValidatorMetrics {
|
||||
pub fn increment_head_misses(&mut self) {
|
||||
self.attestation_head_misses += 1;
|
||||
}
|
||||
|
||||
pub fn set_latest_inclusion_distance(&mut self, distance: u64) {
|
||||
self.latest_attestation_inclusion_distance = distance;
|
||||
}
|
||||
}
|
||||
|
||||
/// A validator that is being monitored by the `ValidatorMonitor`.
|
||||
@ -568,7 +573,6 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
||||
} else {
|
||||
validator_metrics.increment_misses()
|
||||
}
|
||||
drop(validator_metrics);
|
||||
|
||||
// Indicates if any attestation made it on-chain.
|
||||
//
|
||||
@ -693,8 +697,10 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
||||
&[id],
|
||||
inclusion_delay as i64,
|
||||
);
|
||||
validator_metrics.set_latest_inclusion_distance(inclusion_delay);
|
||||
}
|
||||
}
|
||||
drop(validator_metrics);
|
||||
|
||||
// Indicates the number of sync committee signatures that made it into
|
||||
// a sync aggregate in the current_epoch (state.epoch - 1).
|
||||
|
@ -1,5 +1,9 @@
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::attestation_verification::{
|
||||
batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error,
|
||||
};
|
||||
use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME};
|
||||
use beacon_chain::{
|
||||
attestation_verification::Error as AttnError,
|
||||
test_utils::{
|
||||
@ -7,6 +11,7 @@ use beacon_chain::{
|
||||
},
|
||||
BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped,
|
||||
};
|
||||
use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use lazy_static::lazy_static;
|
||||
use state_processing::{
|
||||
@ -14,9 +19,9 @@ use state_processing::{
|
||||
};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, BeaconStateError,
|
||||
BitList, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof,
|
||||
SignedAggregateAndProof, Slot, SubnetId, Unsigned,
|
||||
test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation,
|
||||
BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, ForkName, Hash256, Keypair,
|
||||
MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, Unsigned,
|
||||
};
|
||||
|
||||
pub type E = MainnetEthSpec;
|
||||
@ -25,6 +30,8 @@ pub type E = MainnetEthSpec;
|
||||
/// have committees where _some_ validators are aggregators but not _all_.
|
||||
pub const VALIDATOR_COUNT: usize = 256;
|
||||
|
||||
pub const CAPELLA_FORK_EPOCH: usize = 1;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
@ -50,6 +57,50 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
|
||||
harness
|
||||
}
|
||||
|
||||
/// Returns a beacon chain harness with Capella fork enabled at epoch 1, and
|
||||
/// all genesis validators start with BLS withdrawal credentials.
|
||||
fn get_harness_capella_spec(
|
||||
validator_count: usize,
|
||||
) -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec) {
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
|
||||
spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH as u64));
|
||||
|
||||
let validator_keypairs = KEYPAIRS[0..validator_count].to_vec();
|
||||
let genesis_state = interop_genesis_state(
|
||||
&validator_keypairs,
|
||||
HARNESS_GENESIS_TIME,
|
||||
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
||||
None,
|
||||
&spec,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||
.spec(spec.clone())
|
||||
.keypairs(validator_keypairs)
|
||||
.withdrawal_keypairs(
|
||||
KEYPAIRS[0..validator_count]
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(Some)
|
||||
.collect(),
|
||||
)
|
||||
.genesis_state_ephemeral_store(genesis_state)
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
harness
|
||||
.execution_block_generator()
|
||||
.move_to_terminal_block()
|
||||
.unwrap();
|
||||
|
||||
harness.advance_slot();
|
||||
|
||||
(harness, spec)
|
||||
}
|
||||
|
||||
/// Returns an attestation that is valid for some slot in the given `chain`.
|
||||
///
|
||||
/// Also returns some info about who created it.
|
||||
@ -998,6 +1049,100 @@ async fn attestation_that_skips_epochs() {
|
||||
.expect("should gossip verify attestation that skips slots");
|
||||
}
|
||||
|
||||
/// Ensures that an attestation can be processed when a validator receives proposer reward
|
||||
/// in an epoch _and_ is scheduled for a withdrawal. This is a regression test for a scenario where
|
||||
/// inconsistent state lookup could cause withdrawal root mismatch.
|
||||
#[tokio::test]
|
||||
async fn attestation_validator_receive_proposer_reward_and_withdrawals() {
|
||||
let (harness, _) = get_harness_capella_spec(VALIDATOR_COUNT);
|
||||
|
||||
// Advance to a Capella block. Make sure the blocks have attestations.
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let attesters = (0..two_thirds).collect();
|
||||
harness
|
||||
.extend_chain(
|
||||
// To trigger the bug we need the proposer attestation reward to be signed at a block
|
||||
// that isn't the first in the epoch.
|
||||
MainnetEthSpec::slots_per_epoch() as usize + 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(attesters),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Add BLS change for the block proposer at slot 33. This sets up a withdrawal for the block proposer.
|
||||
let proposer_index = harness
|
||||
.chain
|
||||
.block_at_slot(harness.get_current_slot(), WhenSlotSkipped::None)
|
||||
.expect("should not error getting block at slot")
|
||||
.expect("should find block at slot")
|
||||
.message()
|
||||
.proposer_index();
|
||||
harness
|
||||
.add_bls_to_execution_change(proposer_index, Address::from_low_u64_be(proposer_index))
|
||||
.unwrap();
|
||||
|
||||
// Apply two blocks: one to process the BLS change, and another to process the withdrawal.
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain(
|
||||
2,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
)
|
||||
.await;
|
||||
let earlier_slot = harness.get_current_slot();
|
||||
let earlier_block = harness
|
||||
.chain
|
||||
.block_at_slot(earlier_slot, WhenSlotSkipped::None)
|
||||
.expect("should not error getting block at slot")
|
||||
.expect("should find block at slot");
|
||||
|
||||
// Extend the chain out a few epochs so we have some chain depth to play with.
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 2,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
)
|
||||
.await;
|
||||
|
||||
let current_slot = harness.get_current_slot();
|
||||
let mut state = harness
|
||||
.chain
|
||||
.get_state(&earlier_block.state_root(), Some(earlier_slot))
|
||||
.expect("should not error getting state")
|
||||
.expect("should find state");
|
||||
|
||||
while state.slot() < current_slot {
|
||||
per_slot_processing(&mut state, None, &harness.spec).expect("should process slot");
|
||||
}
|
||||
|
||||
let state_root = state.update_tree_hash_cache().unwrap();
|
||||
|
||||
// Get an attestation pointed to an old block (where we do not have its shuffling cached).
|
||||
// Verifying the attestation triggers an inconsistent state replay.
|
||||
let remaining_attesters = (two_thirds..VALIDATOR_COUNT).collect();
|
||||
let (attestation, subnet_id) = harness
|
||||
.get_unaggregated_attestations(
|
||||
&AttestationStrategy::SomeValidators(remaining_attesters),
|
||||
&state,
|
||||
state_root,
|
||||
earlier_block.canonical_root(),
|
||||
current_slot,
|
||||
)
|
||||
.first()
|
||||
.expect("should have at least one committee")
|
||||
.first()
|
||||
.cloned()
|
||||
.expect("should have at least one attestation in committee");
|
||||
|
||||
harness
|
||||
.chain
|
||||
.verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id))
|
||||
.expect("should gossip verify attestation without checking withdrawals root");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn attestation_to_finalized_block() {
|
||||
let harness = get_harness(VALIDATOR_COUNT);
|
||||
@ -1189,3 +1334,198 @@ async fn verify_attestation_for_gossip_doppelganger_detection() {
|
||||
.validator_has_been_observed(epoch, index)
|
||||
.expect("should check if gossip aggregator was observed"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn attestation_verification_use_head_state_fork() {
|
||||
let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT);
|
||||
|
||||
// Advance to last block of the pre-Capella fork epoch. Capella is at slot 32.
|
||||
harness
|
||||
.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Assert our head is a block at slot 31 in the pre-Capella fork epoch.
|
||||
let pre_capella_slot = harness.get_current_slot();
|
||||
let pre_capella_block = harness
|
||||
.chain
|
||||
.block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev)
|
||||
.expect("should not error getting block at slot")
|
||||
.expect("should find block at slot");
|
||||
assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge);
|
||||
|
||||
// Advance slot clock to Capella fork.
|
||||
harness.advance_slot();
|
||||
let first_capella_slot = harness.get_current_slot();
|
||||
assert_eq!(
|
||||
spec.fork_name_at_slot::<E>(first_capella_slot),
|
||||
ForkName::Capella
|
||||
);
|
||||
|
||||
let (state, state_root) = harness.get_current_state_and_root();
|
||||
|
||||
// Scenario 1: other node signed attestation using the Capella fork epoch.
|
||||
{
|
||||
let attesters = (0..VALIDATOR_COUNT / 2).collect::<Vec<_>>();
|
||||
let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap();
|
||||
let committee_attestations = harness
|
||||
.make_unaggregated_attestations_with_opts(
|
||||
attesters.as_slice(),
|
||||
&state,
|
||||
state_root,
|
||||
pre_capella_block.canonical_root().into(),
|
||||
first_capella_slot,
|
||||
MakeAttestationOptions {
|
||||
fork: capella_fork,
|
||||
limit: None,
|
||||
},
|
||||
)
|
||||
.0
|
||||
.first()
|
||||
.cloned()
|
||||
.expect("should have at least one committee");
|
||||
let attestations_and_subnets = committee_attestations
|
||||
.iter()
|
||||
.map(|(attestation, subnet_id)| (attestation, Some(*subnet_id)));
|
||||
|
||||
assert!(
|
||||
batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain).is_ok(),
|
||||
"should accept attestations with `data.slot` >= first capella slot signed using the Capella fork"
|
||||
);
|
||||
}
|
||||
|
||||
// Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork
|
||||
{
|
||||
let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||
let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap();
|
||||
let committee_attestations = harness
|
||||
.make_unaggregated_attestations_with_opts(
|
||||
attesters.as_slice(),
|
||||
&state,
|
||||
state_root,
|
||||
pre_capella_block.canonical_root().into(),
|
||||
first_capella_slot,
|
||||
MakeAttestationOptions {
|
||||
fork: merge_fork,
|
||||
limit: None,
|
||||
},
|
||||
)
|
||||
.0
|
||||
.first()
|
||||
.cloned()
|
||||
.expect("should have at least one committee");
|
||||
let attestations_and_subnets = committee_attestations
|
||||
.iter()
|
||||
.map(|(attestation, subnet_id)| (attestation, Some(*subnet_id)));
|
||||
|
||||
let results =
|
||||
batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain)
|
||||
.expect("should return attestation results");
|
||||
let error = results
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.err()
|
||||
.expect("should return an error");
|
||||
assert!(
|
||||
matches!(error, Error::InvalidSignature),
|
||||
"should reject attestations with `data.slot` >= first capella slot signed using the pre-Capella fork"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn aggregated_attestation_verification_use_head_state_fork() {
|
||||
let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT);
|
||||
|
||||
// Advance to last block of the pre-Capella fork epoch. Capella is at slot 32.
|
||||
harness
|
||||
.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::SomeValidators(vec![]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Assert our head is a block at slot 31 in the pre-Capella fork epoch.
|
||||
let pre_capella_slot = harness.get_current_slot();
|
||||
let pre_capella_block = harness
|
||||
.chain
|
||||
.block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev)
|
||||
.expect("should not error getting block at slot")
|
||||
.expect("should find block at slot");
|
||||
assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge);
|
||||
|
||||
// Advance slot clock to Capella fork.
|
||||
harness.advance_slot();
|
||||
let first_capella_slot = harness.get_current_slot();
|
||||
assert_eq!(
|
||||
spec.fork_name_at_slot::<E>(first_capella_slot),
|
||||
ForkName::Capella
|
||||
);
|
||||
|
||||
let (state, state_root) = harness.get_current_state_and_root();
|
||||
|
||||
// Scenario 1: other node signed attestation using the Capella fork epoch.
|
||||
{
|
||||
let attesters = (0..VALIDATOR_COUNT / 2).collect::<Vec<_>>();
|
||||
let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap();
|
||||
let aggregates = harness
|
||||
.make_attestations_with_opts(
|
||||
attesters.as_slice(),
|
||||
&state,
|
||||
state_root,
|
||||
pre_capella_block.canonical_root().into(),
|
||||
first_capella_slot,
|
||||
MakeAttestationOptions {
|
||||
fork: capella_fork,
|
||||
limit: None,
|
||||
},
|
||||
)
|
||||
.0
|
||||
.into_iter()
|
||||
.map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain).is_ok(),
|
||||
"should accept aggregates with `data.slot` >= first capella slot signed using the Capella fork"
|
||||
);
|
||||
}
|
||||
|
||||
// Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork
|
||||
{
|
||||
let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||
let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap();
|
||||
let aggregates = harness
|
||||
.make_attestations_with_opts(
|
||||
attesters.as_slice(),
|
||||
&state,
|
||||
state_root,
|
||||
pre_capella_block.canonical_root().into(),
|
||||
first_capella_slot,
|
||||
MakeAttestationOptions {
|
||||
fork: merge_fork,
|
||||
limit: None,
|
||||
},
|
||||
)
|
||||
.0
|
||||
.into_iter()
|
||||
.map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof"))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let results = batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain)
|
||||
.expect("should return attestation results");
|
||||
let error = results
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.err()
|
||||
.expect("should return an error");
|
||||
assert!(
|
||||
matches!(error, Error::InvalidSignature),
|
||||
"should reject aggregates with `data.slot` >= first capella slot signed using the pre-Capella fork"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -13,7 +13,8 @@ use slasher::{Config as SlasherConfig, Slasher};
|
||||
use state_processing::{
|
||||
common::get_indexed_attestation,
|
||||
per_block_processing::{per_block_processing, BlockSignatureStrategy},
|
||||
per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot,
|
||||
per_slot_processing, BlockProcessingError, ConsensusContext, StateProcessingStrategy,
|
||||
VerifyBlockRoot,
|
||||
};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
@ -1191,6 +1192,7 @@ async fn add_base_block_to_altair_chain() {
|
||||
&mut state,
|
||||
&base_block,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut ctxt,
|
||||
&harness.chain.spec,
|
||||
@ -1329,6 +1331,7 @@ async fn add_altair_block_to_base_chain() {
|
||||
&mut state,
|
||||
&altair_block,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut ctxt,
|
||||
&harness.chain.spec,
|
||||
|
@ -916,6 +916,9 @@ async fn invalid_after_optimistic_sync() {
|
||||
.await,
|
||||
);
|
||||
|
||||
// EL status should still be online, no errors.
|
||||
assert!(!rig.execution_layer().is_offline_or_erroring().await);
|
||||
|
||||
// Running fork choice is necessary since a block has been invalidated.
|
||||
rig.recompute_head().await;
|
||||
|
||||
|
@ -5,12 +5,16 @@ use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, Relativ
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use lazy_static::lazy_static;
|
||||
use safe_arith::SafeArith;
|
||||
use state_processing::{
|
||||
per_block_processing::{altair::sync_committee::process_sync_aggregate, VerifySignatures},
|
||||
state_advance::complete_state_advance,
|
||||
};
|
||||
use store::{SignedContributionAndProof, SyncCommitteeMessage};
|
||||
use tree_hash::TreeHash;
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
use types::{
|
||||
AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot,
|
||||
SyncSelectionProof, SyncSubnetId, Unsigned,
|
||||
SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned,
|
||||
};
|
||||
|
||||
pub type E = MainnetEthSpec;
|
||||
@ -47,10 +51,29 @@ fn get_valid_sync_committee_message(
|
||||
relative_sync_committee: RelativeSyncCommittee,
|
||||
message_index: usize,
|
||||
) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) {
|
||||
let head_state = harness.chain.head_beacon_state_cloned();
|
||||
let head_block_root = harness.chain.head_snapshot().beacon_block_root;
|
||||
get_valid_sync_committee_message_for_block(
|
||||
harness,
|
||||
slot,
|
||||
relative_sync_committee,
|
||||
message_index,
|
||||
head_block_root,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a sync message that is valid for some slot in the given `chain`.
|
||||
///
|
||||
/// Also returns some info about who created it.
|
||||
fn get_valid_sync_committee_message_for_block(
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
slot: Slot,
|
||||
relative_sync_committee: RelativeSyncCommittee,
|
||||
message_index: usize,
|
||||
block_root: Hash256,
|
||||
) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) {
|
||||
let head_state = harness.chain.head_beacon_state_cloned();
|
||||
let (signature, _) = harness
|
||||
.make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee)
|
||||
.make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee)
|
||||
.get(0)
|
||||
.expect("sync messages should exist")
|
||||
.get(message_index)
|
||||
@ -119,7 +142,7 @@ fn get_non_aggregator(
|
||||
subcommittee.iter().find_map(|pubkey| {
|
||||
let validator_index = harness
|
||||
.chain
|
||||
.validator_index(&pubkey)
|
||||
.validator_index(pubkey)
|
||||
.expect("should get validator index")
|
||||
.expect("pubkey should exist in beacon chain");
|
||||
|
||||
@ -376,7 +399,7 @@ async fn aggregated_gossip_verification() {
|
||||
SyncCommitteeError::AggregatorNotInCommittee {
|
||||
aggregator_index
|
||||
}
|
||||
if aggregator_index == valid_aggregate.message.aggregator_index as u64
|
||||
if aggregator_index == valid_aggregate.message.aggregator_index
|
||||
);
|
||||
|
||||
/*
|
||||
@ -472,7 +495,7 @@ async fn aggregated_gossip_verification() {
|
||||
|
||||
assert_invalid!(
|
||||
"sync contribution created with incorrect sync committee",
|
||||
next_valid_contribution.clone(),
|
||||
next_valid_contribution,
|
||||
SyncCommitteeError::InvalidSignature | SyncCommitteeError::AggregatorNotInCommittee { .. }
|
||||
);
|
||||
}
|
||||
@ -496,6 +519,30 @@ async fn unaggregated_gossip_verification() {
|
||||
|
||||
let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) =
|
||||
get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0);
|
||||
let parent_root = harness.chain.head_snapshot().beacon_block.parent_root();
|
||||
let (valid_sync_committee_message_to_parent, _, _, _) =
|
||||
get_valid_sync_committee_message_for_block(
|
||||
&harness,
|
||||
current_slot,
|
||||
RelativeSyncCommittee::Current,
|
||||
0,
|
||||
parent_root,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
valid_sync_committee_message.slot, valid_sync_committee_message_to_parent.slot,
|
||||
"test pre-condition: same slot"
|
||||
);
|
||||
assert_eq!(
|
||||
valid_sync_committee_message.validator_index,
|
||||
valid_sync_committee_message_to_parent.validator_index,
|
||||
"test pre-condition: same validator index"
|
||||
);
|
||||
assert!(
|
||||
valid_sync_committee_message.beacon_block_root
|
||||
!= valid_sync_committee_message_to_parent.beacon_block_root,
|
||||
"test pre-condition: differing roots"
|
||||
);
|
||||
|
||||
macro_rules! assert_invalid {
|
||||
($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => {
|
||||
@ -602,28 +649,130 @@ async fn unaggregated_gossip_verification() {
|
||||
SyncCommitteeError::InvalidSignature
|
||||
);
|
||||
|
||||
let head_root = valid_sync_committee_message.beacon_block_root;
|
||||
let parent_root = valid_sync_committee_message_to_parent.beacon_block_root;
|
||||
|
||||
let verifed_message_to_parent = harness
|
||||
.chain
|
||||
.verify_sync_committee_message_for_gossip(
|
||||
valid_sync_committee_message_to_parent.clone(),
|
||||
subnet_id,
|
||||
)
|
||||
.expect("valid sync message to parent should be verified");
|
||||
// Add the aggregate to the pool.
|
||||
harness
|
||||
.chain
|
||||
.verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id)
|
||||
.expect("valid sync message should be verified");
|
||||
.add_to_naive_sync_aggregation_pool(verifed_message_to_parent)
|
||||
.unwrap();
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* There has been no other valid sync committee message for the declared slot for the
|
||||
* validator referenced by sync_committee_message.validator_index.
|
||||
* A sync committee message from the same validator to the same block will
|
||||
* be rejected.
|
||||
*/
|
||||
assert_invalid!(
|
||||
"sync message that has already been seen",
|
||||
valid_sync_committee_message,
|
||||
"sync message to parent block that has already been seen",
|
||||
valid_sync_committee_message_to_parent.clone(),
|
||||
subnet_id,
|
||||
SyncCommitteeError::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot,
|
||||
prev_root,
|
||||
new_root
|
||||
}
|
||||
if validator_index == expected_validator_index as u64 && slot == current_slot
|
||||
if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == parent_root && new_root == parent_root
|
||||
);
|
||||
|
||||
let verified_message_to_head = harness
|
||||
.chain
|
||||
.verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id)
|
||||
.expect("valid sync message to the head should be verified");
|
||||
// Add the aggregate to the pool.
|
||||
harness
|
||||
.chain
|
||||
.add_to_naive_sync_aggregation_pool(verified_message_to_head)
|
||||
.unwrap();
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* A sync committee message from the same validator to the same block will
|
||||
* be rejected.
|
||||
*/
|
||||
assert_invalid!(
|
||||
"sync message to the head that has already been seen",
|
||||
valid_sync_committee_message.clone(),
|
||||
subnet_id,
|
||||
SyncCommitteeError::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot,
|
||||
prev_root,
|
||||
new_root
|
||||
}
|
||||
if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == head_root
|
||||
);
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
* A sync committee message from the same validator to a non-head block will
|
||||
* be rejected.
|
||||
*/
|
||||
assert_invalid!(
|
||||
"sync message to parent after message to head has already been seen",
|
||||
valid_sync_committee_message_to_parent.clone(),
|
||||
subnet_id,
|
||||
SyncCommitteeError::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot,
|
||||
prev_root,
|
||||
new_root
|
||||
}
|
||||
if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == parent_root
|
||||
);
|
||||
|
||||
// Ensure that the sync aggregates in the op pool for both the parent block and head block are valid.
|
||||
let chain = &harness.chain;
|
||||
let check_sync_aggregate = |root: Hash256| async move {
|
||||
// Generate an aggregate sync message from the naive aggregation pool.
|
||||
let aggregate = chain
|
||||
.get_aggregated_sync_committee_contribution(&SyncContributionData {
|
||||
// It's a test pre-condition that both sync messages have the same slot.
|
||||
slot: valid_sync_committee_message.slot,
|
||||
beacon_block_root: root,
|
||||
subcommittee_index: subnet_id.into(),
|
||||
})
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// Insert the aggregate into the op pool.
|
||||
chain.op_pool.insert_sync_contribution(aggregate).unwrap();
|
||||
|
||||
// Load the block and state for the given root.
|
||||
let block = chain.get_block(&root).await.unwrap().unwrap();
|
||||
let mut state = chain.get_state(&block.state_root(), None).unwrap().unwrap();
|
||||
|
||||
// Advance the state to simulate a pre-state for block production.
|
||||
let slot = valid_sync_committee_message.slot + 1;
|
||||
complete_state_advance(&mut state, Some(block.state_root()), slot, &chain.spec).unwrap();
|
||||
|
||||
// Get an aggregate that would be included in a block.
|
||||
let aggregate_for_inclusion = chain.op_pool.get_sync_aggregate(&state).unwrap().unwrap();
|
||||
|
||||
// Validate the retrieved aggregate against the state.
|
||||
process_sync_aggregate(
|
||||
&mut state,
|
||||
&aggregate_for_inclusion,
|
||||
0,
|
||||
VerifySignatures::True,
|
||||
&chain.spec,
|
||||
)
|
||||
.unwrap();
|
||||
};
|
||||
check_sync_aggregate(valid_sync_committee_message.beacon_block_root).await;
|
||||
check_sync_aggregate(valid_sync_committee_message_to_parent.beacon_block_root).await;
|
||||
|
||||
/*
|
||||
* The following test ensures that:
|
||||
*
|
||||
@ -649,7 +798,7 @@ async fn unaggregated_gossip_verification() {
|
||||
|
||||
assert_invalid!(
|
||||
"sync message on incorrect subnet",
|
||||
next_valid_sync_committee_message.clone(),
|
||||
next_valid_sync_committee_message,
|
||||
next_subnet_id,
|
||||
SyncCommitteeError::InvalidSubnetId {
|
||||
received,
|
||||
|
@ -6,7 +6,6 @@ edition = "2021"
|
||||
|
||||
[dev-dependencies]
|
||||
serde_yaml = "0.8.13"
|
||||
logging = { path = "../../common/logging" }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
tokio = "1.14.0"
|
||||
@ -17,6 +16,7 @@ store = { path = "../store" }
|
||||
network = { path = "../network" }
|
||||
timer = { path = "../timer" }
|
||||
lighthouse_network = { path = "../lighthouse_network" }
|
||||
logging = { path = "../../common/logging" }
|
||||
parking_lot = "0.12.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
eth2_config = { path = "../../common/eth2_config" }
|
||||
|
@ -259,6 +259,12 @@ where
|
||||
genesis_state_bytes,
|
||||
} => {
|
||||
info!(context.log(), "Starting checkpoint sync");
|
||||
if config.chain.genesis_backfill {
|
||||
info!(
|
||||
context.log(),
|
||||
"Blocks will downloaded all the way back to genesis"
|
||||
);
|
||||
}
|
||||
|
||||
let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec)
|
||||
.map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?;
|
||||
@ -280,6 +286,12 @@ where
|
||||
"Starting checkpoint sync";
|
||||
"remote_url" => %url,
|
||||
);
|
||||
if config.chain.genesis_backfill {
|
||||
info!(
|
||||
context.log(),
|
||||
"Blocks will be downloaded all the way back to genesis"
|
||||
);
|
||||
}
|
||||
|
||||
let remote = BeaconNodeHttpClient::new(
|
||||
url,
|
||||
@ -475,6 +487,7 @@ where
|
||||
network_globals: None,
|
||||
eth1_service: Some(genesis_service.eth1_service.clone()),
|
||||
log: context.log().clone(),
|
||||
sse_logging_components: runtime_context.sse_logging_components.clone(),
|
||||
});
|
||||
|
||||
// Discard the error from the oneshot.
|
||||
@ -695,6 +708,7 @@ where
|
||||
network_senders: self.network_senders.clone(),
|
||||
network_globals: self.network_globals.clone(),
|
||||
eth1_service: self.eth1_service.clone(),
|
||||
sse_logging_components: runtime_context.sse_logging_components.clone(),
|
||||
log: log.clone(),
|
||||
});
|
||||
|
||||
|
@ -142,7 +142,8 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
.get_anchor_info()
|
||||
.map(|ai| ai.oldest_block_slot)
|
||||
{
|
||||
sync_distance = current_anchor_slot;
|
||||
sync_distance = current_anchor_slot
|
||||
.saturating_sub(beacon_chain.genesis_backfill_slot);
|
||||
speedo
|
||||
// For backfill sync use a fake slot which is the distance we've progressed from the starting `oldest_block_slot`.
|
||||
.observe(
|
||||
@ -207,14 +208,14 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||
"Downloading historical blocks";
|
||||
"distance" => distance,
|
||||
"speed" => sync_speed_pretty(speed),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))),
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Downloading historical blocks";
|
||||
"distance" => distance,
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))),
|
||||
"est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))),
|
||||
);
|
||||
}
|
||||
} else if !is_backfilling && last_backfill_log_slot.is_some() {
|
||||
|
@ -7,7 +7,6 @@ edition = "2021"
|
||||
[dev-dependencies]
|
||||
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
||||
serde_yaml = "0.8.13"
|
||||
web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] }
|
||||
sloggers = { version = "2.1.1", features = ["json"] }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
|
||||
@ -20,9 +19,9 @@ serde = { version = "1.0.116", features = ["derive"] }
|
||||
hex = "0.4.2"
|
||||
types = { path = "../../consensus/types"}
|
||||
merkle_proof = { path = "../../consensus/merkle_proof"}
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_derive = "0.3.1"
|
||||
tree_hash = "0.4.1"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ethereum_ssz_derive = "0.5.0"
|
||||
tree_hash = "0.5.0"
|
||||
parking_lot = "0.12.0"
|
||||
slog = "2.5.2"
|
||||
superstruct = "0.5.0"
|
||||
|
@ -2,7 +2,7 @@
|
||||
use environment::{Environment, EnvironmentBuilder};
|
||||
use eth1::{Config, Eth1Endpoint, Service};
|
||||
use eth1::{DepositCache, DEFAULT_CHAIN_ID};
|
||||
use eth1_test_rig::GanacheEth1Instance;
|
||||
use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider};
|
||||
use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log};
|
||||
use merkle_proof::verify_merkle_proof;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
@ -12,7 +12,6 @@ use std::ops::Range;
|
||||
use std::time::Duration;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature};
|
||||
use web3::{transports::Http, Web3};
|
||||
|
||||
const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32;
|
||||
|
||||
@ -53,7 +52,7 @@ fn random_deposit_data() -> DepositData {
|
||||
/// Blocking operation to get the deposit logs from the `deposit_contract`.
|
||||
async fn blocking_deposit_logs(
|
||||
client: &HttpJsonRpc,
|
||||
eth1: &GanacheEth1Instance,
|
||||
eth1: &AnvilEth1Instance,
|
||||
range: Range<u64>,
|
||||
) -> Vec<Log> {
|
||||
client
|
||||
@ -65,7 +64,7 @@ async fn blocking_deposit_logs(
|
||||
/// Blocking operation to get the deposit root from the `deposit_contract`.
|
||||
async fn blocking_deposit_root(
|
||||
client: &HttpJsonRpc,
|
||||
eth1: &GanacheEth1Instance,
|
||||
eth1: &AnvilEth1Instance,
|
||||
block_number: u64,
|
||||
) -> Option<Hash256> {
|
||||
client
|
||||
@ -77,7 +76,7 @@ async fn blocking_deposit_root(
|
||||
/// Blocking operation to get the deposit count from the `deposit_contract`.
|
||||
async fn blocking_deposit_count(
|
||||
client: &HttpJsonRpc,
|
||||
eth1: &GanacheEth1Instance,
|
||||
eth1: &AnvilEth1Instance,
|
||||
block_number: u64,
|
||||
) -> Option<u64> {
|
||||
client
|
||||
@ -86,16 +85,16 @@ async fn blocking_deposit_count(
|
||||
.expect("should get deposit count")
|
||||
}
|
||||
|
||||
async fn get_block_number(web3: &Web3<Http>) -> u64 {
|
||||
web3.eth()
|
||||
.block_number()
|
||||
async fn get_block_number(client: &Provider<Http>) -> u64 {
|
||||
client
|
||||
.get_block_number()
|
||||
.await
|
||||
.map(|v| v.as_u64())
|
||||
.expect("should get block number")
|
||||
}
|
||||
|
||||
async fn new_ganache_instance() -> Result<GanacheEth1Instance, String> {
|
||||
GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await
|
||||
async fn new_anvil_instance() -> Result<AnvilEth1Instance, String> {
|
||||
AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await
|
||||
}
|
||||
|
||||
mod eth1_cache {
|
||||
@ -108,13 +107,13 @@ mod eth1_cache {
|
||||
let log = null_logger();
|
||||
|
||||
for follow_distance in 0..3 {
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let initial_block_number = get_block_number(&web3).await;
|
||||
let initial_block_number = get_block_number(&anvil_client).await;
|
||||
|
||||
let config = Config {
|
||||
endpoint: Eth1Endpoint::NoAuth(
|
||||
@ -146,7 +145,7 @@ mod eth1_cache {
|
||||
};
|
||||
|
||||
for _ in 0..blocks {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
eth1.anvil.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
service
|
||||
@ -189,11 +188,11 @@ mod eth1_cache {
|
||||
async {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let cache_len = 4;
|
||||
|
||||
@ -203,7 +202,7 @@ mod eth1_cache {
|
||||
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||
),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
lowest_cached_block_number: get_block_number(&anvil_client).await,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: Some(cache_len),
|
||||
..Config::default()
|
||||
@ -216,7 +215,7 @@ mod eth1_cache {
|
||||
let blocks = cache_len * 2;
|
||||
|
||||
for _ in 0..blocks {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
eth1.anvil.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
|
||||
service
|
||||
@ -244,11 +243,11 @@ mod eth1_cache {
|
||||
async {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let cache_len = 4;
|
||||
|
||||
@ -258,7 +257,7 @@ mod eth1_cache {
|
||||
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||
),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
lowest_cached_block_number: get_block_number(&anvil_client).await,
|
||||
follow_distance: 0,
|
||||
block_cache_truncation: Some(cache_len),
|
||||
..Config::default()
|
||||
@ -270,7 +269,7 @@ mod eth1_cache {
|
||||
|
||||
for _ in 0..4u8 {
|
||||
for _ in 0..cache_len / 2 {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
eth1.anvil.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
service
|
||||
.update_deposit_cache(None)
|
||||
@ -298,11 +297,11 @@ mod eth1_cache {
|
||||
|
||||
let n = 16;
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@ -310,7 +309,7 @@ mod eth1_cache {
|
||||
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||
),
|
||||
deposit_contract_address: deposit_contract.address(),
|
||||
lowest_cached_block_number: get_block_number(&web3).await,
|
||||
lowest_cached_block_number: get_block_number(&anvil_client).await,
|
||||
follow_distance: 0,
|
||||
..Config::default()
|
||||
},
|
||||
@ -320,7 +319,7 @@ mod eth1_cache {
|
||||
.unwrap();
|
||||
|
||||
for _ in 0..n {
|
||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||
eth1.anvil.evm_mine().await.expect("should mine block")
|
||||
}
|
||||
|
||||
futures::try_join!(
|
||||
@ -341,6 +340,7 @@ mod eth1_cache {
|
||||
}
|
||||
|
||||
mod deposit_tree {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
@ -350,13 +350,13 @@ mod deposit_tree {
|
||||
|
||||
let n = 4;
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let start_block = get_block_number(&web3).await;
|
||||
let start_block = get_block_number(&anvil_client).await;
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@ -431,13 +431,13 @@ mod deposit_tree {
|
||||
|
||||
let n = 8;
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let start_block = get_block_number(&web3).await;
|
||||
let start_block = get_block_number(&anvil_client).await;
|
||||
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@ -484,11 +484,12 @@ mod deposit_tree {
|
||||
|
||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let mut deposit_roots = vec![];
|
||||
let mut deposit_counts = vec![];
|
||||
@ -502,7 +503,7 @@ mod deposit_tree {
|
||||
.deposit(deposit.clone())
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let block_number = get_block_number(&anvil_client).await;
|
||||
deposit_roots.push(
|
||||
blocking_deposit_root(&client, ð1, block_number)
|
||||
.await
|
||||
@ -518,7 +519,7 @@ mod deposit_tree {
|
||||
let mut tree = DepositCache::default();
|
||||
|
||||
// Pull all the deposit logs from the contract.
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let block_number = get_block_number(&anvil_client).await;
|
||||
let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number)
|
||||
.await
|
||||
.iter()
|
||||
@ -593,15 +594,15 @@ mod http {
|
||||
#[tokio::test]
|
||||
async fn incrementing_deposits() {
|
||||
async {
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
let client =
|
||||
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
||||
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let block_number = get_block_number(&anvil_client).await;
|
||||
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
||||
assert_eq!(logs.len(), 0);
|
||||
|
||||
@ -616,10 +617,10 @@ mod http {
|
||||
);
|
||||
|
||||
for i in 1..=8 {
|
||||
eth1.ganache
|
||||
eth1.anvil
|
||||
.increase_time(1)
|
||||
.await
|
||||
.expect("should be able to increase time on ganache");
|
||||
.expect("should be able to increase time on anvil");
|
||||
|
||||
deposit_contract
|
||||
.deposit(random_deposit_data())
|
||||
@ -627,7 +628,7 @@ mod http {
|
||||
.expect("should perform a deposit");
|
||||
|
||||
// Check the logs.
|
||||
let block_number = get_block_number(&web3).await;
|
||||
let block_number = get_block_number(&anvil_client).await;
|
||||
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
||||
assert_eq!(logs.len(), i, "the number of logs should be as expected");
|
||||
|
||||
@ -690,13 +691,13 @@ mod fast {
|
||||
async {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let now = get_block_number(&web3).await;
|
||||
let now = get_block_number(&anvil_client).await;
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let service = Service::new(
|
||||
Config {
|
||||
@ -724,7 +725,7 @@ mod fast {
|
||||
.await
|
||||
.expect("should perform a deposit");
|
||||
// Mine an extra block between deposits to test for corner cases
|
||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||
eth1.anvil.evm_mine().await.expect("should mine block");
|
||||
}
|
||||
|
||||
service
|
||||
@ -737,7 +738,7 @@ mod fast {
|
||||
"should have imported n deposits"
|
||||
);
|
||||
|
||||
for block_num in 0..=get_block_number(&web3).await {
|
||||
for block_num in 0..=get_block_number(&anvil_client).await {
|
||||
let expected_deposit_count =
|
||||
blocking_deposit_count(&client, ð1, block_num).await;
|
||||
let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await;
|
||||
@ -773,13 +774,13 @@ mod persist {
|
||||
async {
|
||||
let log = null_logger();
|
||||
|
||||
let eth1 = new_ganache_instance()
|
||||
let eth1 = new_anvil_instance()
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let anvil_client = eth1.json_rpc_client();
|
||||
|
||||
let now = get_block_number(&web3).await;
|
||||
let now = get_block_number(&anvil_client).await;
|
||||
let config = Config {
|
||||
endpoint: Eth1Endpoint::NoAuth(
|
||||
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||
|
@ -13,7 +13,7 @@ slog = "2.5.2"
|
||||
futures = "0.3.7"
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
||||
eth2_serde_utils = "0.1.1"
|
||||
ethereum_serde_utils = "0.5.0"
|
||||
serde_json = "1.0.58"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
warp = { version = "0.3.2", features = ["tls"] }
|
||||
@ -22,16 +22,16 @@ environment = { path = "../../lighthouse/environment" }
|
||||
bytes = "1.1.0"
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
hex = "0.4.2"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_types = "0.2.2"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ssz_types = "0.5.0"
|
||||
eth2 = { path = "../../common/eth2" }
|
||||
kzg = { path = "../../crypto/kzg" }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
superstruct = "0.6.0"
|
||||
lru = "0.7.1"
|
||||
exit-future = "0.2.0"
|
||||
tree_hash = "0.4.1"
|
||||
tree_hash_derive = { path = "../../consensus/tree_hash_derive"}
|
||||
tree_hash = "0.5.0"
|
||||
tree_hash_derive = "0.5.0"
|
||||
parking_lot = "0.12.0"
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
tempfile = "3.1.0"
|
||||
|
@ -142,11 +142,11 @@ pub enum BlockByNumberQuery<'a> {
|
||||
pub struct ExecutionBlock {
|
||||
#[serde(rename = "hash")]
|
||||
pub block_hash: ExecutionBlockHash,
|
||||
#[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(rename = "number", with = "serde_utils::u64_hex_be")]
|
||||
pub block_number: u64,
|
||||
pub parent_hash: ExecutionBlockHash,
|
||||
pub total_difficulty: Uint256,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub timestamp: u64,
|
||||
}
|
||||
|
||||
@ -172,13 +172,13 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
|
||||
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
||||
#[serde(alias = "mixHash")]
|
||||
pub prev_randao: Hash256,
|
||||
#[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(rename = "number", with = "serde_utils::u64_hex_be")]
|
||||
pub block_number: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub gas_limit: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub gas_used: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub timestamp: u64,
|
||||
#[serde(with = "ssz_types::serde_utils::hex_var_list")]
|
||||
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
|
||||
@ -189,7 +189,7 @@ pub struct ExecutionBlockWithTransactions<T: EthSpec> {
|
||||
#[superstruct(only(Capella, Deneb))]
|
||||
pub withdrawals: Vec<JsonWithdrawal>,
|
||||
#[superstruct(only(Deneb))]
|
||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub excess_data_gas: Uint256,
|
||||
}
|
||||
|
||||
|
@ -993,7 +993,7 @@ impl HttpJsonRpc {
|
||||
) -> Result<Vec<Option<ExecutionPayloadBodyV1<E>>>, Error> {
|
||||
#[derive(Serialize)]
|
||||
#[serde(transparent)]
|
||||
struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] u64);
|
||||
struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64);
|
||||
|
||||
let params = json!([Quantity(start), Quantity(count)]);
|
||||
let response: Vec<Option<JsonExecutionPayloadBodyV1<E>>> = self
|
||||
|
@ -37,7 +37,7 @@ pub struct JsonResponseBody {
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId);
|
||||
pub struct TransparentJsonPayloadId(#[serde(with = "serde_utils::bytes_8_hex")] pub PayloadId);
|
||||
|
||||
impl From<PayloadId> for TransparentJsonPayloadId {
|
||||
fn from(id: PayloadId) -> Self {
|
||||
@ -58,7 +58,7 @@ pub type JsonPayloadIdRequest = TransparentJsonPayloadId;
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct JsonPayloadIdResponse {
|
||||
#[serde(with = "eth2_serde_utils::bytes_8_hex")]
|
||||
#[serde(with = "serde_utils::bytes_8_hex")]
|
||||
pub payload_id: PayloadId,
|
||||
}
|
||||
|
||||
@ -81,17 +81,17 @@ pub struct JsonExecutionPayload<T: EthSpec> {
|
||||
#[serde(with = "serde_logs_bloom")]
|
||||
pub logs_bloom: FixedVector<u8, T::BytesPerLogsBloom>,
|
||||
pub prev_randao: Hash256,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub block_number: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub gas_limit: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub gas_used: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub timestamp: u64,
|
||||
#[serde(with = "ssz_types::serde_utils::hex_var_list")]
|
||||
pub extra_data: VariableList<u8, T::MaxExtraDataBytes>,
|
||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub base_fee_per_gas: Uint256,
|
||||
pub block_hash: ExecutionBlockHash,
|
||||
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")]
|
||||
@ -99,7 +99,7 @@ pub struct JsonExecutionPayload<T: EthSpec> {
|
||||
#[superstruct(only(V2, V3))]
|
||||
pub withdrawals: VariableList<JsonWithdrawal, T::MaxWithdrawalsPerPayload>,
|
||||
#[superstruct(only(V3))]
|
||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub excess_data_gas: Uint256,
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ pub struct JsonGetPayloadResponse<T: EthSpec> {
|
||||
pub execution_payload: JsonExecutionPayloadV2<T>,
|
||||
#[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))]
|
||||
pub execution_payload: JsonExecutionPayloadV3<T>,
|
||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub block_value: Uint256,
|
||||
#[superstruct(only(V3))]
|
||||
pub blobs_bundle: JsonBlobsBundleV1<T>,
|
||||
@ -324,12 +324,12 @@ impl<T: EthSpec> From<JsonGetPayloadResponse<T>> for GetPayloadResponse<T> {
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct JsonWithdrawal {
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub index: u64,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub validator_index: u64,
|
||||
pub address: Address,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub amount: u64,
|
||||
}
|
||||
|
||||
@ -367,7 +367,7 @@ impl From<JsonWithdrawal> for Withdrawal {
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub struct JsonPayloadAttributes {
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub timestamp: u64,
|
||||
pub prev_randao: Hash256,
|
||||
pub suggested_fee_recipient: Address,
|
||||
@ -620,18 +620,18 @@ impl<E: EthSpec> From<JsonExecutionPayloadBodyV1<E>> for ExecutionPayloadBodyV1<
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TransitionConfigurationV1 {
|
||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||
#[serde(with = "serde_utils::u256_hex_be")]
|
||||
pub terminal_total_difficulty: Uint256,
|
||||
pub terminal_block_hash: ExecutionBlockHash,
|
||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||
#[serde(with = "serde_utils::u64_hex_be")]
|
||||
pub terminal_block_number: u64,
|
||||
}
|
||||
|
||||
/// Serializes the `logs_bloom` field of an `ExecutionPayload`.
|
||||
pub mod serde_logs_bloom {
|
||||
use super::*;
|
||||
use eth2_serde_utils::hex::PrefixedHexVisitor;
|
||||
use serde::{Deserializer, Serializer};
|
||||
use serde_utils::hex::PrefixedHexVisitor;
|
||||
|
||||
pub fn serialize<S, U>(bytes: &FixedVector<u8, U>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
|
@ -238,6 +238,11 @@ impl Engine {
|
||||
**self.state.read().await == EngineStateInternal::Synced
|
||||
}
|
||||
|
||||
/// Returns `true` if the engine has a status other than synced or syncing.
|
||||
pub async fn is_offline(&self) -> bool {
|
||||
EngineState::from(**self.state.read().await) == EngineState::Offline
|
||||
}
|
||||
|
||||
/// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This
|
||||
/// might be used to recover the node if offline.
|
||||
pub async fn upcheck(&self) {
|
||||
|
@ -307,6 +307,11 @@ struct Inner<E: EthSpec> {
|
||||
builder_profit_threshold: Uint256,
|
||||
log: Logger,
|
||||
always_prefer_builder_payload: bool,
|
||||
/// Track whether the last `newPayload` call errored.
|
||||
///
|
||||
/// This is used *only* in the informational sync status endpoint, so that a VC using this
|
||||
/// node can prefer another node with a healthier EL.
|
||||
last_new_payload_errored: RwLock<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
@ -413,7 +418,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Connected to external block builder";
|
||||
"Using external block builder";
|
||||
"builder_url" => ?url,
|
||||
"builder_profit_threshold" => builder_profit_threshold,
|
||||
"local_user_agent" => builder_client.get_user_agent(),
|
||||
@ -435,6 +440,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
||||
log,
|
||||
always_prefer_builder_payload,
|
||||
last_new_payload_errored: RwLock::new(false),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
@ -627,6 +633,15 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
synced
|
||||
}
|
||||
|
||||
/// Return `true` if the execution layer is offline or returning errors on `newPayload`.
|
||||
///
|
||||
/// This function should never be used to prevent any operation in the beacon node, but can
|
||||
/// be used to give an indication on the HTTP API that the node's execution layer is struggling,
|
||||
/// which can in turn be used by the VC.
|
||||
pub async fn is_offline_or_erroring(&self) -> bool {
|
||||
self.engine().is_offline().await || *self.inner.last_new_payload_errored.read().await
|
||||
}
|
||||
|
||||
/// Updates the proposer preparation data provided by validators
|
||||
pub async fn update_proposer_preparation(
|
||||
&self,
|
||||
@ -1192,18 +1207,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
|
||||
/// Maps to the `engine_newPayload` JSON-RPC call.
|
||||
///
|
||||
/// ## Fallback Behaviour
|
||||
///
|
||||
/// The request will be broadcast to all nodes, simultaneously. It will await a response (or
|
||||
/// failure) from all nodes and then return based on the first of these conditions which
|
||||
/// returns true:
|
||||
///
|
||||
/// - Error::ConsensusFailure if some nodes return valid and some return invalid
|
||||
/// - Valid, if any nodes return valid.
|
||||
/// - Invalid, if any nodes return invalid.
|
||||
/// - Syncing, if any nodes return syncing.
|
||||
/// - An error, if all nodes return an error.
|
||||
pub async fn notify_new_payload(
|
||||
&self,
|
||||
execution_payload: &ExecutionPayload<T>,
|
||||
@ -1232,12 +1235,18 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
&["new_payload", status.status.into()],
|
||||
);
|
||||
}
|
||||
*self.inner.last_new_payload_errored.write().await = result.is_err();
|
||||
|
||||
process_payload_status(execution_payload.block_hash(), result, self.log())
|
||||
.map_err(Box::new)
|
||||
.map_err(Error::EngineError)
|
||||
}
|
||||
|
||||
/// Update engine sync status.
|
||||
pub async fn upcheck(&self) {
|
||||
self.engine().upcheck().await;
|
||||
}
|
||||
|
||||
/// Register that the given `validator_index` is going to produce a block at `slot`.
|
||||
///
|
||||
/// The block will be built atop `head_block_root` and the EL will need to prepare an
|
||||
@ -1297,18 +1306,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
}
|
||||
|
||||
/// Maps to the `engine_consensusValidated` JSON-RPC call.
|
||||
///
|
||||
/// ## Fallback Behaviour
|
||||
///
|
||||
/// The request will be broadcast to all nodes, simultaneously. It will await a response (or
|
||||
/// failure) from all nodes and then return based on the first of these conditions which
|
||||
/// returns true:
|
||||
///
|
||||
/// - Error::ConsensusFailure if some nodes return valid and some return invalid
|
||||
/// - Valid, if any nodes return valid.
|
||||
/// - Invalid, if any nodes return invalid.
|
||||
/// - Syncing, if any nodes return syncing.
|
||||
/// - An error, if all nodes return an error.
|
||||
pub async fn notify_forkchoice_updated(
|
||||
&self,
|
||||
head_block_hash: ExecutionBlockHash,
|
||||
@ -2273,7 +2270,7 @@ fn ethers_tx_to_bytes<T: EthSpec>(
|
||||
.ok_or(BlobTxConversionError::VersionedHashesMissing)?
|
||||
.iter()
|
||||
.map(|versioned_hash| {
|
||||
let hash_bytes = eth2_serde_utils::hex::decode(
|
||||
let hash_bytes = serde_utils::hex::decode(
|
||||
versioned_hash
|
||||
.as_str()
|
||||
.ok_or(BlobTxConversionError::VersionedHashesMissing)?,
|
||||
|
@ -30,7 +30,12 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
.map_err(|s| (s, GENERIC_ERROR_CODE))?;
|
||||
|
||||
match method {
|
||||
ETH_SYNCING => Ok(JsonValue::Bool(false)),
|
||||
ETH_SYNCING => ctx
|
||||
.syncing_response
|
||||
.lock()
|
||||
.clone()
|
||||
.map(JsonValue::Bool)
|
||||
.map_err(|message| (message, GENERIC_ERROR_CODE)),
|
||||
ETH_GET_BLOCK_BY_NUMBER => {
|
||||
let tag = params
|
||||
.get(0)
|
||||
@ -180,7 +185,9 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
|
||||
// Canned responses set by block hash take priority.
|
||||
if let Some(status) = ctx.get_new_payload_status(request.block_hash()) {
|
||||
return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap());
|
||||
return status
|
||||
.map(|status| serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap())
|
||||
.map_err(|message| (message, GENERIC_ERROR_CODE));
|
||||
}
|
||||
|
||||
let (static_response, should_import) =
|
||||
@ -398,11 +405,15 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
|
||||
// Canned responses set by block hash take priority.
|
||||
if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) {
|
||||
let response = JsonForkchoiceUpdatedV1Response {
|
||||
payload_status: JsonPayloadStatusV1::from(status),
|
||||
payload_id: None,
|
||||
};
|
||||
return Ok(serde_json::to_value(response).unwrap());
|
||||
return status
|
||||
.map(|status| {
|
||||
let response = JsonForkchoiceUpdatedV1Response {
|
||||
payload_status: JsonPayloadStatusV1::from(status),
|
||||
payload_id: None,
|
||||
};
|
||||
serde_json::to_value(response).unwrap()
|
||||
})
|
||||
.map_err(|message| (message, GENERIC_ERROR_CODE));
|
||||
}
|
||||
|
||||
let mut response = ctx
|
||||
@ -440,7 +451,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
||||
ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => {
|
||||
#[derive(Deserialize)]
|
||||
#[serde(transparent)]
|
||||
struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] pub u64);
|
||||
struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64);
|
||||
|
||||
let start = get_param::<Quantity>(params, 0)
|
||||
.map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?
|
||||
|
@ -140,6 +140,7 @@ impl<T: EthSpec> MockServer<T> {
|
||||
hook: <_>::default(),
|
||||
new_payload_statuses: <_>::default(),
|
||||
fcu_payload_statuses: <_>::default(),
|
||||
syncing_response: Arc::new(Mutex::new(Ok(false))),
|
||||
engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
@ -433,14 +434,25 @@ impl<T: EthSpec> MockServer<T> {
|
||||
self.ctx
|
||||
.new_payload_statuses
|
||||
.lock()
|
||||
.insert(block_hash, status);
|
||||
.insert(block_hash, Ok(status));
|
||||
}
|
||||
|
||||
pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) {
|
||||
self.ctx
|
||||
.fcu_payload_statuses
|
||||
.lock()
|
||||
.insert(block_hash, status);
|
||||
.insert(block_hash, Ok(status));
|
||||
}
|
||||
|
||||
pub fn set_new_payload_error(&self, block_hash: ExecutionBlockHash, error: String) {
|
||||
self.ctx
|
||||
.new_payload_statuses
|
||||
.lock()
|
||||
.insert(block_hash, Err(error));
|
||||
}
|
||||
|
||||
pub fn set_syncing_response(&self, res: Result<bool, String>) {
|
||||
*self.ctx.syncing_response.lock() = res;
|
||||
}
|
||||
}
|
||||
|
||||
@ -497,8 +509,11 @@ pub struct Context<T: EthSpec> {
|
||||
//
|
||||
// This is a more flexible and less stateful alternative to `static_new_payload_response`
|
||||
// and `preloaded_responses`.
|
||||
pub new_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
|
||||
pub fcu_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
|
||||
pub new_payload_statuses:
|
||||
Arc<Mutex<HashMap<ExecutionBlockHash, Result<PayloadStatusV1, String>>>>,
|
||||
pub fcu_payload_statuses:
|
||||
Arc<Mutex<HashMap<ExecutionBlockHash, Result<PayloadStatusV1, String>>>>,
|
||||
pub syncing_response: Arc<Mutex<Result<bool, String>>>,
|
||||
|
||||
pub engine_capabilities: Arc<RwLock<EngineCapabilities>>,
|
||||
pub _phantom: PhantomData<T>,
|
||||
@ -508,14 +523,14 @@ impl<T: EthSpec> Context<T> {
|
||||
pub fn get_new_payload_status(
|
||||
&self,
|
||||
block_hash: &ExecutionBlockHash,
|
||||
) -> Option<PayloadStatusV1> {
|
||||
) -> Option<Result<PayloadStatusV1, String>> {
|
||||
self.new_payload_statuses.lock().get(block_hash).cloned()
|
||||
}
|
||||
|
||||
pub fn get_fcu_payload_status(
|
||||
&self,
|
||||
block_hash: &ExecutionBlockHash,
|
||||
) -> Option<PayloadStatusV1> {
|
||||
) -> Option<Result<PayloadStatusV1, String>> {
|
||||
self.fcu_payload_statuses.lock().get(block_hash).cloned()
|
||||
}
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ eth1 = { path = "../eth1"}
|
||||
rayon = "1.4.1"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_hashing = "0.3.0"
|
||||
tree_hash = "0.4.1"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ethereum_hashing = "1.0.0-beta.2"
|
||||
tree_hash = "0.5.0"
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
slog = "2.5.2"
|
||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::common::genesis_deposits;
|
||||
use eth2_hashing::hash;
|
||||
use ethereum_hashing::hash;
|
||||
use rayon::prelude::*;
|
||||
use ssz::Encode;
|
||||
use state_processing::initialize_beacon_state_from_eth1;
|
||||
|
@ -1,11 +1,11 @@
|
||||
//! NOTE: These tests will not pass unless ganache is running on `ENDPOINT` (see below).
|
||||
//! NOTE: These tests will not pass unless an anvil is running on `ENDPOINT` (see below).
|
||||
//!
|
||||
//! You can start a suitable instance using the `ganache_test_node.sh` script in the `scripts`
|
||||
//! You can start a suitable instance using the `anvil_test_node.sh` script in the `scripts`
|
||||
//! dir in the root of the `lighthouse` repo.
|
||||
#![cfg(test)]
|
||||
use environment::{Environment, EnvironmentBuilder};
|
||||
use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID};
|
||||
use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance};
|
||||
use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware};
|
||||
use genesis::{Eth1Config, Eth1GenesisService};
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use state_processing::is_valid_genesis_state;
|
||||
@ -29,15 +29,14 @@ fn basic() {
|
||||
let mut spec = env.eth2_config().spec.clone();
|
||||
|
||||
env.runtime().block_on(async {
|
||||
let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into())
|
||||
let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into())
|
||||
.await
|
||||
.expect("should start eth1 environment");
|
||||
let deposit_contract = ð1.deposit_contract;
|
||||
let web3 = eth1.web3();
|
||||
let client = eth1.json_rpc_client();
|
||||
|
||||
let now = web3
|
||||
.eth()
|
||||
.block_number()
|
||||
let now = client
|
||||
.get_block_number()
|
||||
.await
|
||||
.map(|v| v.as_u64())
|
||||
.expect("should get block number");
|
||||
@ -89,7 +88,7 @@ fn basic() {
|
||||
.map(|(_, state)| state)
|
||||
.expect("should finish waiting for genesis");
|
||||
|
||||
// Note: using ganache these deposits are 1-per-block, therefore we know there should only be
|
||||
// Note: using anvil these deposits are 1-per-block, therefore we know there should only be
|
||||
// the minimum number of validators.
|
||||
assert_eq!(
|
||||
state.validators().len(),
|
||||
|
@ -24,7 +24,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
lazy_static = "1.4.0"
|
||||
warp_utils = { path = "../../common/warp_utils" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
eth2_ssz = "0.4.1"
|
||||
ethereum_ssz = "0.5.0"
|
||||
bs58 = "0.4.0"
|
||||
futures = "0.3.8"
|
||||
execution_layer = {path = "../execution_layer"}
|
||||
@ -32,15 +32,15 @@ parking_lot = "0.12.0"
|
||||
safe_arith = {path = "../../consensus/safe_arith"}
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
lru = "0.7.7"
|
||||
tree_hash = "0.4.1"
|
||||
tree_hash = "0.5.0"
|
||||
sysinfo = "0.26.5"
|
||||
system_health = { path = "../../common/system_health" }
|
||||
directory = { path = "../../common/directory" }
|
||||
eth2_serde_utils = "0.1.1"
|
||||
logging = { path = "../../common/logging" }
|
||||
ethereum_serde_utils = "0.5.0"
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
unused_port = {path = "../../common/unused_port"}
|
||||
logging = { path = "../../common/logging" }
|
||||
store = { path = "../store" }
|
||||
|
||||
[dev-dependencies]
|
||||
@ -51,4 +51,4 @@ genesis = { path = "../genesis" }
|
||||
|
||||
[[test]]
|
||||
name = "bn_http_api_tests"
|
||||
path = "tests/main.rs"
|
||||
path = "tests/main.rs"
|
@ -37,6 +37,7 @@ use eth2::types::{
|
||||
};
|
||||
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||
use lighthouse_version::version_with_platform;
|
||||
use logging::SSELoggingComponents;
|
||||
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
use parking_lot::RwLock;
|
||||
@ -109,6 +110,7 @@ pub struct Context<T: BeaconChainTypes> {
|
||||
pub network_senders: Option<NetworkSenders<T::EthSpec>>,
|
||||
pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
pub eth1_service: Option<eth1::Service>,
|
||||
pub sse_logging_components: Option<SSELoggingComponents>,
|
||||
pub log: Logger,
|
||||
}
|
||||
|
||||
@ -449,6 +451,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let inner_ctx = ctx.clone();
|
||||
let log_filter = warp::any().map(move || inner_ctx.log.clone());
|
||||
|
||||
let inner_components = ctx.sse_logging_components.clone();
|
||||
let sse_component_filter = warp::any().map(move || inner_components.clone());
|
||||
|
||||
// Create a `warp` filter that provides access to local system information.
|
||||
let system_info = Arc::new(RwLock::new(sysinfo::System::new()));
|
||||
{
|
||||
@ -2238,12 +2243,8 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.parent
|
||||
.and_then(|index| proto_array.nodes.get(index))
|
||||
.map(|parent| parent.root),
|
||||
justified_epoch: node
|
||||
.justified_checkpoint
|
||||
.map(|checkpoint| checkpoint.epoch),
|
||||
finalized_epoch: node
|
||||
.finalized_checkpoint
|
||||
.map(|checkpoint| checkpoint.epoch),
|
||||
justified_epoch: node.justified_checkpoint.epoch,
|
||||
finalized_epoch: node.finalized_checkpoint.epoch,
|
||||
weight: node.weight,
|
||||
validity: execution_status,
|
||||
execution_block_hash: node
|
||||
@ -2325,28 +2326,40 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(chain_filter.clone())
|
||||
.and_then(
|
||||
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let head_slot = chain.canonical_head.cached_head().head_slot();
|
||||
let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error("Unable to read slot clock".into())
|
||||
})?;
|
||||
|
||||
// Taking advantage of saturating subtraction on slot.
|
||||
let sync_distance = current_slot - head_slot;
|
||||
|
||||
let is_optimistic = chain
|
||||
.is_optimistic_or_invalid_head()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
let syncing_data = api_types::SyncingData {
|
||||
is_syncing: network_globals.sync_state.read().is_syncing(),
|
||||
is_optimistic: Some(is_optimistic),
|
||||
head_slot,
|
||||
sync_distance,
|
||||
async move {
|
||||
let el_offline = if let Some(el) = &chain.execution_layer {
|
||||
el.is_offline_or_erroring().await
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
Ok(api_types::GenericResponse::from(syncing_data))
|
||||
})
|
||||
blocking_json_task(move || {
|
||||
let head_slot = chain.canonical_head.cached_head().head_slot();
|
||||
let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error(
|
||||
"Unable to read slot clock".into(),
|
||||
)
|
||||
})?;
|
||||
|
||||
// Taking advantage of saturating subtraction on slot.
|
||||
let sync_distance = current_slot - head_slot;
|
||||
|
||||
let is_optimistic = chain
|
||||
.is_optimistic_or_invalid_head()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
|
||||
let syncing_data = api_types::SyncingData {
|
||||
is_syncing: network_globals.sync_state.read().is_syncing(),
|
||||
is_optimistic: Some(is_optimistic),
|
||||
el_offline: Some(el_offline),
|
||||
head_slot,
|
||||
sync_distance,
|
||||
};
|
||||
|
||||
Ok(api_types::GenericResponse::from(syncing_data))
|
||||
})
|
||||
.await
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
@ -3760,6 +3773,44 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// Subscribe to logs via Server Side Events
|
||||
// /lighthouse/logs
|
||||
let lighthouse_log_events = warp::path("lighthouse")
|
||||
.and(warp::path("logs"))
|
||||
.and(warp::path::end())
|
||||
.and(sse_component_filter)
|
||||
.and_then(|sse_component: Option<SSELoggingComponents>| {
|
||||
blocking_response_task(move || {
|
||||
if let Some(logging_components) = sse_component {
|
||||
// Build a JSON stream
|
||||
let s =
|
||||
BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| {
|
||||
match msg {
|
||||
Ok(data) => {
|
||||
// Serialize to json
|
||||
match data.to_json_string() {
|
||||
// Send the json as a Server Side Event
|
||||
Ok(json) => Ok(Event::default().data(json)),
|
||||
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
||||
format!("Unable to serialize to JSON {}", e),
|
||||
)),
|
||||
}
|
||||
}
|
||||
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
||||
format!("Unable to receive event {}", e),
|
||||
)),
|
||||
}
|
||||
});
|
||||
|
||||
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
||||
} else {
|
||||
Err(warp_utils::reject::custom_server_error(
|
||||
"SSE Logging is not enabled".to_string(),
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
// Define the ultimate set of routes that will be provided to the server.
|
||||
// Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`).
|
||||
let routes = warp::get()
|
||||
@ -3828,6 +3879,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_lighthouse_block_packing_efficiency)
|
||||
.uor(get_lighthouse_merge_readiness)
|
||||
.uor(get_events)
|
||||
.uor(lighthouse_log_events.boxed())
|
||||
.recover(warp_utils::reject::handle_rejection),
|
||||
)
|
||||
.boxed()
|
||||
|
@ -199,10 +199,14 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>(
|
||||
Err(SyncVerificationError::PriorSyncCommitteeMessageKnown {
|
||||
validator_index,
|
||||
slot,
|
||||
prev_root,
|
||||
new_root,
|
||||
}) => {
|
||||
debug!(
|
||||
log,
|
||||
"Ignoring already-known sync message";
|
||||
"new_root" => ?new_root,
|
||||
"prev_root" => ?prev_root,
|
||||
"slot" => slot,
|
||||
"validator_index" => validator_index,
|
||||
);
|
||||
|
@ -195,6 +195,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
||||
network_senders: Some(network_senders),
|
||||
network_globals: Some(network_globals),
|
||||
eth1_service: Some(eth1_service),
|
||||
sse_logging_components: None,
|
||||
log,
|
||||
});
|
||||
|
||||
|
@ -75,15 +75,15 @@ pub fn get_validator_count<T: BeaconChainTypes>(
|
||||
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
pub struct ValidatorInfoRequestData {
|
||||
#[serde(with = "eth2_serde_utils::quoted_u64_vec")]
|
||||
#[serde(with = "serde_utils::quoted_u64_vec")]
|
||||
indices: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
pub struct ValidatorInfoValues {
|
||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
epoch: u64,
|
||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||
#[serde(with = "serde_utils::quoted_u64")]
|
||||
total_balance: u64,
|
||||
}
|
||||
|
||||
@ -165,6 +165,7 @@ pub struct ValidatorMetrics {
|
||||
attestation_target_hits: u64,
|
||||
attestation_target_misses: u64,
|
||||
attestation_target_hit_percentage: f64,
|
||||
latest_attestation_inclusion_distance: u64,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
@ -210,6 +211,8 @@ pub fn post_validator_monitor_metrics<T: BeaconChainTypes>(
|
||||
let attestation_head_misses = val_metrics.attestation_head_misses;
|
||||
let attestation_target_hits = val_metrics.attestation_target_hits;
|
||||
let attestation_target_misses = val_metrics.attestation_target_misses;
|
||||
let latest_attestation_inclusion_distance =
|
||||
val_metrics.latest_attestation_inclusion_distance;
|
||||
drop(val_metrics);
|
||||
|
||||
let attestations = attestation_hits + attestation_misses;
|
||||
@ -242,6 +245,7 @@ pub fn post_validator_monitor_metrics<T: BeaconChainTypes>(
|
||||
attestation_target_hits,
|
||||
attestation_target_misses,
|
||||
attestation_target_hit_percentage,
|
||||
latest_attestation_inclusion_distance,
|
||||
};
|
||||
|
||||
validators.insert(id.clone(), metrics);
|
||||
|
@ -2,4 +2,5 @@
|
||||
|
||||
pub mod fork_tests;
|
||||
pub mod interactive_tests;
|
||||
pub mod status_tests;
|
||||
pub mod tests;
|
||||
|
151
beacon_node/http_api/tests/status_tests.rs
Normal file
151
beacon_node/http_api/tests/status_tests.rs
Normal file
@ -0,0 +1,151 @@
|
||||
//! Tests related to the beacon node's sync status
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
||||
BlockError,
|
||||
};
|
||||
use execution_layer::{PayloadStatusV1, PayloadStatusV1Status};
|
||||
use http_api::test_utils::InteractiveTester;
|
||||
use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
/// Create a new test environment that is post-merge with `chain_depth` blocks.
|
||||
async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> InteractiveTester<E> {
|
||||
// Test using latest fork so that we simulate conditions as similar to mainnet as possible.
|
||||
// TODO(jimmy): We should change this back to `latest()`. These tests currently fail on Deneb because:
|
||||
// 1. KZG library doesn't support Minimal spec, changing to Mainnet spec fixes some tests; BUT
|
||||
// 2. `harness.process_block_result` in the test below panics due to
|
||||
// `AvailabilityProcessingStatus::PendingBlobs`, and there seems to be some race
|
||||
// condition going on, because the test passes if I step through the code in debug.
|
||||
let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec());
|
||||
spec.terminal_total_difficulty = 1.into();
|
||||
|
||||
let tester = InteractiveTester::<E>::new(Some(spec), validator_count as usize).await;
|
||||
let harness = &tester.harness;
|
||||
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
|
||||
let execution_ctx = mock_el.server.ctx.clone();
|
||||
|
||||
// Move to terminal block.
|
||||
mock_el.server.all_payloads_valid();
|
||||
execution_ctx
|
||||
.execution_block_generator
|
||||
.write()
|
||||
.move_to_terminal_block()
|
||||
.unwrap();
|
||||
|
||||
// Create some chain depth.
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain_with_sync(
|
||||
chain_depth as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
SyncCommitteeStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
tester
|
||||
}
|
||||
|
||||
/// Check `syncing` endpoint when the EL is syncing.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn el_syncing_then_synced() {
|
||||
let num_blocks = E::slots_per_epoch() / 2;
|
||||
let num_validators = E::slots_per_epoch();
|
||||
let tester = post_merge_tester(num_blocks, num_validators).await;
|
||||
let harness = &tester.harness;
|
||||
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
|
||||
|
||||
// EL syncing
|
||||
mock_el.server.set_syncing_response(Ok(true));
|
||||
mock_el.el.upcheck().await;
|
||||
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(false));
|
||||
assert_eq!(api_response.is_optimistic, Some(false));
|
||||
assert_eq!(api_response.is_syncing, false);
|
||||
|
||||
// EL synced
|
||||
mock_el.server.set_syncing_response(Ok(false));
|
||||
mock_el.el.upcheck().await;
|
||||
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(false));
|
||||
assert_eq!(api_response.is_optimistic, Some(false));
|
||||
assert_eq!(api_response.is_syncing, false);
|
||||
}
|
||||
|
||||
/// Check `syncing` endpoint when the EL is offline (errors on upcheck).
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn el_offline() {
|
||||
let num_blocks = E::slots_per_epoch() / 2;
|
||||
let num_validators = E::slots_per_epoch();
|
||||
let tester = post_merge_tester(num_blocks, num_validators).await;
|
||||
let harness = &tester.harness;
|
||||
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
|
||||
|
||||
// EL offline
|
||||
mock_el.server.set_syncing_response(Err("offline".into()));
|
||||
mock_el.el.upcheck().await;
|
||||
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(true));
|
||||
assert_eq!(api_response.is_optimistic, Some(false));
|
||||
assert_eq!(api_response.is_syncing, false);
|
||||
}
|
||||
|
||||
/// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn el_error_on_new_payload() {
|
||||
let num_blocks = E::slots_per_epoch() / 2;
|
||||
let num_validators = E::slots_per_epoch();
|
||||
let tester = post_merge_tester(num_blocks, num_validators).await;
|
||||
let harness = &tester.harness;
|
||||
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
|
||||
|
||||
// Make a block.
|
||||
let pre_state = harness.get_current_state();
|
||||
let (block_contents, _) = harness
|
||||
.make_block(pre_state, Slot::new(num_blocks + 1))
|
||||
.await;
|
||||
let block = block_contents.0;
|
||||
let block_hash = block
|
||||
.message()
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.block_hash();
|
||||
|
||||
// Make sure `newPayload` errors for the new block.
|
||||
mock_el
|
||||
.server
|
||||
.set_new_payload_error(block_hash, "error".into());
|
||||
|
||||
// Attempt to process the block, which should error.
|
||||
harness.advance_slot();
|
||||
assert!(matches!(
|
||||
harness.process_block_result(block.clone()).await,
|
||||
Err(BlockError::ExecutionPayloadError(_))
|
||||
));
|
||||
|
||||
// The EL should now be *offline* according to the API.
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(true));
|
||||
assert_eq!(api_response.is_optimistic, Some(false));
|
||||
assert_eq!(api_response.is_syncing, false);
|
||||
|
||||
// Processing a block successfully should remove the status.
|
||||
mock_el.server.set_new_payload_status(
|
||||
block_hash,
|
||||
PayloadStatusV1 {
|
||||
status: PayloadStatusV1Status::Valid,
|
||||
latest_valid_hash: Some(block_hash),
|
||||
validation_error: None,
|
||||
},
|
||||
);
|
||||
harness.process_block_result(block).await.unwrap();
|
||||
|
||||
let api_response = tester.client.get_node_syncing().await.unwrap().data;
|
||||
assert_eq!(api_response.el_offline, Some(false));
|
||||
assert_eq!(api_response.is_optimistic, Some(false));
|
||||
assert_eq!(api_response.is_syncing, false);
|
||||
}
|
@ -1729,6 +1729,8 @@ impl ApiTester {
|
||||
let expected = SyncingData {
|
||||
is_syncing: false,
|
||||
is_optimistic: Some(false),
|
||||
// these tests run without the Bellatrix fork enabled
|
||||
el_offline: Some(true),
|
||||
head_slot,
|
||||
sync_distance,
|
||||
};
|
||||
@ -1964,8 +1966,8 @@ impl ApiTester {
|
||||
.parent
|
||||
.and_then(|index| expected_proto_array.nodes.get(index))
|
||||
.map(|parent| parent.root),
|
||||
justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch),
|
||||
finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch),
|
||||
justified_epoch: node.justified_checkpoint.epoch,
|
||||
finalized_epoch: node.finalized_checkpoint.epoch,
|
||||
weight: node.weight,
|
||||
validity: execution_status,
|
||||
execution_block_hash: node
|
||||
|
@ -8,13 +8,13 @@ edition = "2021"
|
||||
discv5 = { version = "0.2.2", features = ["libp2p"] }
|
||||
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
||||
types = { path = "../../consensus/types" }
|
||||
eth2_ssz_types = "0.2.2"
|
||||
ssz_types = "0.5.0"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
serde_derive = "1.0.116"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_derive = "0.3.0"
|
||||
tree_hash = "0.4.1"
|
||||
tree_hash_derive = "0.4.0"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ethereum_ssz_derive = "0.5.0"
|
||||
tree_hash = "0.5.0"
|
||||
tree_hash_derive = "0.5.0"
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||
tokio = { version = "1.14.0", features = ["time", "macros"] }
|
||||
|
@ -134,6 +134,9 @@ pub struct Config {
|
||||
/// List of extra topics to initially subscribe to as strings.
|
||||
pub topics: Vec<GossipKind>,
|
||||
|
||||
/// Whether we are running a block proposer only node.
|
||||
pub proposer_only: bool,
|
||||
|
||||
/// Whether metrics are enabled.
|
||||
pub metrics_enabled: bool,
|
||||
|
||||
@ -142,6 +145,9 @@ pub struct Config {
|
||||
|
||||
/// Configuration for the outbound rate limiter (requests made by this node).
|
||||
pub outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>,
|
||||
|
||||
/// Configures if/where invalid blocks should be stored.
|
||||
pub invalid_block_storage: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@ -322,9 +328,11 @@ impl Default for Config {
|
||||
import_all_attestations: false,
|
||||
shutdown_after_sync: false,
|
||||
topics: Vec::new(),
|
||||
proposer_only: false,
|
||||
metrics_enabled: false,
|
||||
enable_light_client_server: false,
|
||||
outbound_rate_limiter_config: None,
|
||||
invalid_block_storage: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -952,6 +952,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
/// MIN_SYNC_COMMITTEE_PEERS
|
||||
/// number should be set low as an absolute lower bound to maintain peers on the sync
|
||||
/// committees.
|
||||
/// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the
|
||||
/// excess peer limit, all of the following logic is subverted as we will not prune any peers.
|
||||
/// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage
|
||||
/// its peers across the subnets.
|
||||
///
|
||||
/// Prune peers in the following order:
|
||||
/// 1. Remove worst scoring peers
|
||||
@ -982,7 +986,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
.read()
|
||||
.worst_connected_peers()
|
||||
.iter()
|
||||
.filter(|(_, info)| !info.has_future_duty() && $filter(*info))
|
||||
.filter(|(_, info)| {
|
||||
!info.has_future_duty() && !info.is_trusted() && $filter(*info)
|
||||
})
|
||||
{
|
||||
if peers_to_prune.len()
|
||||
>= connected_peer_count.saturating_sub(self.target_peers)
|
||||
@ -1032,8 +1038,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
> = HashMap::new();
|
||||
|
||||
for (peer_id, info) in self.network_globals.peers.read().connected_peers() {
|
||||
// Ignore peers we are already pruning
|
||||
if peers_to_prune.contains(peer_id) {
|
||||
// Ignore peers we trust or that we are already pruning
|
||||
if info.is_trusted() || peers_to_prune.contains(peer_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1330,25 +1336,47 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let log = build_log(slog::Level::Debug, false);
|
||||
let globals = NetworkGlobals::new_test_globals(&log);
|
||||
let globals = NetworkGlobals::new_test_globals(vec![], &log);
|
||||
PeerManager::new(config, Arc::new(globals), &log).unwrap()
|
||||
}
|
||||
|
||||
async fn build_peer_manager_with_trusted_peers(
|
||||
trusted_peers: Vec<PeerId>,
|
||||
target_peer_count: usize,
|
||||
) -> PeerManager<E> {
|
||||
let config = config::Config {
|
||||
target_peer_count,
|
||||
discovery_enabled: false,
|
||||
..Default::default()
|
||||
};
|
||||
let log = build_log(slog::Level::Debug, false);
|
||||
let globals = NetworkGlobals::new_test_globals(trusted_peers, &log);
|
||||
PeerManager::new(config, Arc::new(globals), &log).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_peer_manager_disconnects_correctly_during_heartbeat() {
|
||||
let mut peer_manager = build_peer_manager(3).await;
|
||||
|
||||
// Create 5 peers to connect to.
|
||||
// Create 6 peers to connect to with a target of 3.
|
||||
// 2 will be outbound-only, and have the lowest score.
|
||||
// 1 will be a trusted peer.
|
||||
// The other 3 will be ingoing peers.
|
||||
|
||||
// We expect this test to disconnect from 3 peers. 1 from the outbound peer (the other must
|
||||
// remain due to the outbound peer limit) and 2 from the ingoing peers (the trusted peer
|
||||
// should remain connected).
|
||||
let peer0 = PeerId::random();
|
||||
let peer1 = PeerId::random();
|
||||
let peer2 = PeerId::random();
|
||||
let outbound_only_peer1 = PeerId::random();
|
||||
let outbound_only_peer2 = PeerId::random();
|
||||
let trusted_peer = PeerId::random();
|
||||
|
||||
let mut peer_manager = build_peer_manager_with_trusted_peers(vec![trusted_peer], 3).await;
|
||||
|
||||
peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
peer_manager.inject_connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||
peer_manager.inject_connect_outgoing(
|
||||
&outbound_only_peer1,
|
||||
"/ip4/0.0.0.0".parse().unwrap(),
|
||||
@ -1378,7 +1406,7 @@ mod tests {
|
||||
.add_to_score(-2.0);
|
||||
|
||||
// Check initial connected peers.
|
||||
assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 5);
|
||||
assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 6);
|
||||
|
||||
peer_manager.heartbeat();
|
||||
|
||||
@ -1397,8 +1425,22 @@ mod tests {
|
||||
.read()
|
||||
.is_connected(&outbound_only_peer2));
|
||||
|
||||
// The trusted peer remains connected
|
||||
assert!(peer_manager
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected(&trusted_peer));
|
||||
|
||||
peer_manager.heartbeat();
|
||||
|
||||
// The trusted peer remains connected, even after subsequent heartbeats.
|
||||
assert!(peer_manager
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected(&trusted_peer));
|
||||
|
||||
// Check that if we are at target number of peers, we do not disconnect any.
|
||||
assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3);
|
||||
}
|
||||
@ -2143,7 +2185,7 @@ mod tests {
|
||||
#[cfg(test)]
|
||||
mod property_based_tests {
|
||||
use crate::peer_manager::config::DEFAULT_TARGET_PEERS;
|
||||
use crate::peer_manager::tests::build_peer_manager;
|
||||
use crate::peer_manager::tests::build_peer_manager_with_trusted_peers;
|
||||
use crate::rpc::MetaData;
|
||||
use libp2p::PeerId;
|
||||
use quickcheck::{Arbitrary, Gen, TestResult};
|
||||
@ -2154,10 +2196,12 @@ mod tests {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PeerCondition {
|
||||
peer_id: PeerId,
|
||||
outgoing: bool,
|
||||
attestation_net_bitfield: Vec<bool>,
|
||||
sync_committee_net_bitfield: Vec<bool>,
|
||||
score: f64,
|
||||
trusted: bool,
|
||||
gossipsub_score: f64,
|
||||
}
|
||||
|
||||
@ -2182,10 +2226,12 @@ mod tests {
|
||||
};
|
||||
|
||||
PeerCondition {
|
||||
peer_id: PeerId::random(),
|
||||
outgoing: bool::arbitrary(g),
|
||||
attestation_net_bitfield,
|
||||
sync_committee_net_bitfield,
|
||||
score: f64::arbitrary(g),
|
||||
trusted: bool::arbitrary(g),
|
||||
gossipsub_score: f64::arbitrary(g),
|
||||
}
|
||||
}
|
||||
@ -2197,26 +2243,36 @@ mod tests {
|
||||
if peer_conditions.len() < target_peer_count {
|
||||
return TestResult::discard();
|
||||
}
|
||||
let trusted_peers: Vec<_> = peer_conditions
|
||||
.iter()
|
||||
.filter_map(|p| if p.trusted { Some(p.peer_id) } else { None })
|
||||
.collect();
|
||||
// If we have a high percentage of trusted peers, it is very difficult to reason about
|
||||
// the expected results of the pruning.
|
||||
if trusted_peers.len() > peer_conditions.len() / 3_usize {
|
||||
return TestResult::discard();
|
||||
}
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
rt.block_on(async move {
|
||||
let mut peer_manager = build_peer_manager(target_peer_count).await;
|
||||
// Collect all the trusted peers
|
||||
let mut peer_manager =
|
||||
build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await;
|
||||
|
||||
// Create peers based on the randomly generated conditions.
|
||||
for condition in &peer_conditions {
|
||||
let peer = PeerId::random();
|
||||
let mut attnets = crate::types::EnrAttestationBitfield::<E>::new();
|
||||
let mut syncnets = crate::types::EnrSyncCommitteeBitfield::<E>::new();
|
||||
|
||||
if condition.outgoing {
|
||||
peer_manager.inject_connect_outgoing(
|
||||
&peer,
|
||||
&condition.peer_id,
|
||||
"/ip4/0.0.0.0".parse().unwrap(),
|
||||
None,
|
||||
);
|
||||
} else {
|
||||
peer_manager.inject_connect_ingoing(
|
||||
&peer,
|
||||
&condition.peer_id,
|
||||
"/ip4/0.0.0.0".parse().unwrap(),
|
||||
None,
|
||||
);
|
||||
@ -2237,22 +2293,51 @@ mod tests {
|
||||
};
|
||||
|
||||
let mut peer_db = peer_manager.network_globals.peers.write();
|
||||
let peer_info = peer_db.peer_info_mut(&peer).unwrap();
|
||||
let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap();
|
||||
peer_info.set_meta_data(MetaData::V2(metadata));
|
||||
peer_info.set_gossipsub_score(condition.gossipsub_score);
|
||||
peer_info.add_to_score(condition.score);
|
||||
|
||||
for subnet in peer_info.long_lived_subnets() {
|
||||
peer_db.add_subscription(&peer, subnet);
|
||||
peer_db.add_subscription(&condition.peer_id, subnet);
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the heartbeat.
|
||||
peer_manager.heartbeat();
|
||||
|
||||
TestResult::from_bool(
|
||||
// The minimum number of connected peers cannot be less than the target peer count
|
||||
// or submitted peers.
|
||||
|
||||
let expected_peer_count = target_peer_count.min(peer_conditions.len());
|
||||
// Trusted peers could make this larger however.
|
||||
let no_of_trusted_peers = peer_conditions
|
||||
.iter()
|
||||
.filter(|condition| condition.trusted)
|
||||
.count();
|
||||
let expected_peer_count = expected_peer_count.max(no_of_trusted_peers);
|
||||
|
||||
let target_peer_condition =
|
||||
peer_manager.network_globals.connected_or_dialing_peers()
|
||||
== target_peer_count.min(peer_conditions.len()),
|
||||
== expected_peer_count;
|
||||
|
||||
// It could be that we reach our target outbound limit and are unable to prune any
|
||||
// extra, which violates the target_peer_condition.
|
||||
let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers();
|
||||
let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers();
|
||||
|
||||
// No trusted peers should be disconnected
|
||||
let trusted_peer_disconnected = peer_conditions.iter().any(|condition| {
|
||||
condition.trusted
|
||||
&& !peer_manager
|
||||
.network_globals
|
||||
.peers
|
||||
.read()
|
||||
.is_connected(&condition.peer_id)
|
||||
});
|
||||
|
||||
TestResult::from_bool(
|
||||
(target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
@ -1062,7 +1062,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
||||
if let Some(to_drop) = self
|
||||
.peers
|
||||
.iter()
|
||||
.filter(|(_, info)| info.is_disconnected())
|
||||
.filter(|(_, info)| info.is_disconnected() && !info.is_trusted())
|
||||
.filter_map(|(id, info)| match info.connection_status() {
|
||||
PeerConnectionStatus::Disconnected { since } => Some((id, since)),
|
||||
_ => None,
|
||||
|
@ -129,7 +129,10 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
||||
}
|
||||
|
||||
/// TESTING ONLY. Build a dummy NetworkGlobals instance.
|
||||
pub fn new_test_globals(log: &slog::Logger) -> NetworkGlobals<TSpec> {
|
||||
pub fn new_test_globals(
|
||||
trusted_peers: Vec<PeerId>,
|
||||
log: &slog::Logger,
|
||||
) -> NetworkGlobals<TSpec> {
|
||||
use crate::CombinedKeyExt;
|
||||
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
||||
let enr_key: discv5::enr::CombinedKey =
|
||||
@ -144,7 +147,7 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
||||
attnets: Default::default(),
|
||||
syncnets: Default::default(),
|
||||
}),
|
||||
vec![],
|
||||
trusted_peers,
|
||||
false,
|
||||
log,
|
||||
)
|
||||
|
@ -13,7 +13,7 @@ pub enum SyncState {
|
||||
/// The node is undertaking a backfill sync. This occurs when a user has specified a trusted
|
||||
/// state. The node first syncs "forward" by downloading blocks up to the current head as
|
||||
/// specified by its peers. Once completed, the node enters this sync state and attempts to
|
||||
/// download all required historical blocks to complete its chain.
|
||||
/// download all required historical blocks.
|
||||
BackFillSyncing { completed: usize, remaining: usize },
|
||||
/// The node has completed syncing a finalized chain and is in the process of re-evaluating
|
||||
/// which sync state to progress to.
|
||||
|
@ -126,36 +126,6 @@ pub fn get_enr(node: &LibP2PService<ReqId, E>) -> Enr {
|
||||
node.local_enr()
|
||||
}
|
||||
|
||||
// Returns `n` libp2p peers in fully connected topology.
|
||||
#[allow(dead_code)]
|
||||
/*
|
||||
pub async fn build_full_mesh(
|
||||
rt: Weak<Runtime>,
|
||||
log: slog::Logger,
|
||||
n: usize,
|
||||
fork_name: ForkName,
|
||||
) -> Vec<Libp2pInstance> {
|
||||
let mut nodes = Vec::with_capacity(n);
|
||||
for _ in 0..n {
|
||||
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await);
|
||||
}
|
||||
let multiaddrs: Vec<Multiaddr> = nodes
|
||||
.iter()
|
||||
.map(|x| get_enr(x).multiaddr()[1].clone())
|
||||
.collect();
|
||||
|
||||
for (i, node) in nodes.iter_mut().enumerate().take(n) {
|
||||
for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) {
|
||||
if i != j {
|
||||
match libp2p::Swarm::dial(&mut node.swarm, multiaddr.clone()) {
|
||||
Ok(()) => debug!(log, "Connected"),
|
||||
Err(_) => error!(log, "Failed to connect"),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
nodes
|
||||
}*/
|
||||
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
||||
// This returns a (sender, receiver) pair.
|
||||
#[allow(dead_code)]
|
||||
|
@ -1,171 +0,0 @@
|
||||
/* These are temporarily disabled due to their non-deterministic behaviour and impending update to
|
||||
* gossipsub 1.1. We leave these here as a template for future test upgrades
|
||||
|
||||
|
||||
#![cfg(test)]
|
||||
use crate::types::GossipEncoding;
|
||||
use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock};
|
||||
use lighthouse_network::*;
|
||||
use slog::{debug, Level};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
mod common;
|
||||
|
||||
/* Gossipsub tests */
|
||||
// Note: The aim of these tests is not to test the robustness of the gossip network
|
||||
// but to check if the gossipsub implementation is behaving according to the specifications.
|
||||
|
||||
// Test if gossipsub message are forwarded by nodes with a simple linear topology.
|
||||
//
|
||||
// Topology used in test
|
||||
//
|
||||
// node1 <-> node2 <-> node3 ..... <-> node(n-1) <-> node(n)
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gossipsub_forward() {
|
||||
// set up the logging. The level and enabled or not
|
||||
let log = common::build_log(Level::Info, false);
|
||||
|
||||
let num_nodes = 20;
|
||||
let mut nodes = common::build_linear(log.clone(), num_nodes);
|
||||
let mut received_count = 0;
|
||||
let spec = E::default_spec();
|
||||
let empty_block = BeaconBlock::empty(&spec);
|
||||
let signed_block = SignedBeaconBlock {
|
||||
message: empty_block,
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block));
|
||||
let publishing_topic: String = pubsub_message
|
||||
.topics(GossipEncoding::default(), [0, 0, 0, 0])
|
||||
.first()
|
||||
.unwrap()
|
||||
.clone()
|
||||
.into();
|
||||
let mut subscribed_count = 0;
|
||||
let fut = async move {
|
||||
for node in nodes.iter_mut() {
|
||||
loop {
|
||||
match node.next_event().await {
|
||||
Libp2pEvent::Behaviour(b) => match b {
|
||||
BehaviourEvent::PubsubMessage {
|
||||
topics,
|
||||
message,
|
||||
source,
|
||||
id,
|
||||
} => {
|
||||
assert_eq!(topics.len(), 1);
|
||||
// Assert topic is the published topic
|
||||
assert_eq!(
|
||||
topics.first().unwrap(),
|
||||
&TopicHash::from_raw(publishing_topic.clone())
|
||||
);
|
||||
// Assert message received is the correct one
|
||||
assert_eq!(message, pubsub_message.clone());
|
||||
received_count += 1;
|
||||
// Since `propagate_message` is false, need to propagate manually
|
||||
node.swarm.propagate_message(&source, id);
|
||||
// Test should succeed if all nodes except the publisher receive the message
|
||||
if received_count == num_nodes - 1 {
|
||||
debug!(log.clone(), "Received message at {} nodes", num_nodes - 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
BehaviourEvent::PeerSubscribed(_, topic) => {
|
||||
// Publish on beacon block topic
|
||||
if topic == TopicHash::from_raw(publishing_topic.clone()) {
|
||||
subscribed_count += 1;
|
||||
// Every node except the corner nodes are connected to 2 nodes.
|
||||
if subscribed_count == (num_nodes * 2) - 2 {
|
||||
node.swarm.publish(vec![pubsub_message.clone()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
},
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = fut => {}
|
||||
_ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test publishing of a message with a full mesh for the topic
|
||||
// Not very useful but this is the bare minimum functionality.
|
||||
#[tokio::test]
|
||||
async fn test_gossipsub_full_mesh_publish() {
|
||||
// set up the logging. The level and enabled or not
|
||||
let log = common::build_log(Level::Debug, false);
|
||||
|
||||
// Note: This test does not propagate gossipsub messages.
|
||||
// Having `num_nodes` > `mesh_n_high` may give inconsistent results
|
||||
// as nodes may get pruned out of the mesh before the gossipsub message
|
||||
// is published to them.
|
||||
let num_nodes = 12;
|
||||
let mut nodes = common::build_full_mesh(log, num_nodes);
|
||||
let mut publishing_node = nodes.pop().unwrap();
|
||||
let spec = E::default_spec();
|
||||
let empty_block = BeaconBlock::empty(&spec);
|
||||
let signed_block = SignedBeaconBlock {
|
||||
message: empty_block,
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block));
|
||||
let publishing_topic: String = pubsub_message
|
||||
.topics(GossipEncoding::default(), [0, 0, 0, 0])
|
||||
.first()
|
||||
.unwrap()
|
||||
.clone()
|
||||
.into();
|
||||
let mut subscribed_count = 0;
|
||||
let mut received_count = 0;
|
||||
let fut = async move {
|
||||
for node in nodes.iter_mut() {
|
||||
while let Libp2pEvent::Behaviour(BehaviourEvent::PubsubMessage {
|
||||
topics,
|
||||
message,
|
||||
..
|
||||
}) = node.next_event().await
|
||||
{
|
||||
assert_eq!(topics.len(), 1);
|
||||
// Assert topic is the published topic
|
||||
assert_eq!(
|
||||
topics.first().unwrap(),
|
||||
&TopicHash::from_raw(publishing_topic.clone())
|
||||
);
|
||||
// Assert message received is the correct one
|
||||
assert_eq!(message, pubsub_message.clone());
|
||||
received_count += 1;
|
||||
if received_count == num_nodes - 1 {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
while let Libp2pEvent::Behaviour(BehaviourEvent::PeerSubscribed(_, topic)) =
|
||||
publishing_node.next_event().await
|
||||
{
|
||||
// Publish on beacon block topic
|
||||
if topic == TopicHash::from_raw(publishing_topic.clone()) {
|
||||
subscribed_count += 1;
|
||||
if subscribed_count == num_nodes - 1 {
|
||||
publishing_node.swarm.publish(vec![pubsub_message.clone()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
tokio::select! {
|
||||
_ = fut => {}
|
||||
_ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => {
|
||||
panic!("Future timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
@ -21,8 +21,8 @@ types = { path = "../../consensus/types" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
hex = "0.4.2"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_types = "0.2.2"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ssz_types = "0.5.0"
|
||||
futures = "0.3.7"
|
||||
error-chain = "0.12.4"
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
@ -35,7 +35,7 @@ lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
logging = { path = "../../common/logging" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
igd = "0.11.1"
|
||||
igd = "0.12.1"
|
||||
itertools = "0.10.0"
|
||||
num_cpus = "1.13.0"
|
||||
lru_cache = { path = "../../common/lru_cache" }
|
||||
|
@ -56,6 +56,7 @@ use logging::TimeLatch;
|
||||
use slog::{crit, debug, error, trace, warn, Logger};
|
||||
use std::collections::VecDeque;
|
||||
use std::future::Future;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::task::Context;
|
||||
@ -1069,6 +1070,13 @@ impl<T: BeaconChainTypes> Stream for InboundEvents<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines if and where we will store the SSZ files of invalid blocks.
|
||||
#[derive(Clone)]
|
||||
pub enum InvalidBlockStorage {
|
||||
Enabled(PathBuf),
|
||||
Disabled,
|
||||
}
|
||||
|
||||
/// A mutli-threaded processor for messages received on the network
|
||||
/// that need to be processed by the `BeaconChain`
|
||||
///
|
||||
@ -1082,6 +1090,7 @@ pub struct BeaconProcessor<T: BeaconChainTypes> {
|
||||
pub max_workers: usize,
|
||||
pub current_workers: usize,
|
||||
pub importing_blocks: DuplicateCache,
|
||||
pub invalid_block_storage: InvalidBlockStorage,
|
||||
pub log: Logger,
|
||||
}
|
||||
|
||||
@ -1783,19 +1792,23 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
peer_client,
|
||||
block,
|
||||
seen_timestamp,
|
||||
} => task_spawner.spawn_async(async move {
|
||||
worker
|
||||
.process_gossip_block(
|
||||
message_id,
|
||||
peer_id,
|
||||
peer_client,
|
||||
block.into(),
|
||||
work_reprocessing_tx,
|
||||
duplicate_cache,
|
||||
seen_timestamp,
|
||||
)
|
||||
.await
|
||||
}),
|
||||
} => {
|
||||
let invalid_block_storage = self.invalid_block_storage.clone();
|
||||
task_spawner.spawn_async(async move {
|
||||
worker
|
||||
.process_gossip_block(
|
||||
message_id,
|
||||
peer_id,
|
||||
peer_client,
|
||||
block.into(),
|
||||
work_reprocessing_tx,
|
||||
duplicate_cache,
|
||||
invalid_block_storage,
|
||||
seen_timestamp,
|
||||
)
|
||||
.await
|
||||
})
|
||||
}
|
||||
/*
|
||||
* Verification for blobs sidecars received on gossip.
|
||||
*/
|
||||
@ -1825,12 +1838,16 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
peer_id,
|
||||
block,
|
||||
seen_timestamp,
|
||||
} => task_spawner.spawn_async(worker.process_gossip_verified_block(
|
||||
peer_id,
|
||||
*block,
|
||||
work_reprocessing_tx,
|
||||
seen_timestamp,
|
||||
)),
|
||||
} => {
|
||||
let invalid_block_storage = self.invalid_block_storage.clone();
|
||||
task_spawner.spawn_async(worker.process_gossip_verified_block(
|
||||
peer_id,
|
||||
*block,
|
||||
work_reprocessing_tx,
|
||||
invalid_block_storage,
|
||||
seen_timestamp,
|
||||
))
|
||||
}
|
||||
/*
|
||||
* Voluntary exits received on gossip.
|
||||
*/
|
||||
|
@ -203,6 +203,7 @@ impl TestRig {
|
||||
max_workers: cmp::max(1, num_cpus::get()),
|
||||
current_workers: 0,
|
||||
importing_blocks: duplicate_cache.clone(),
|
||||
invalid_block_storage: InvalidBlockStorage::Disabled,
|
||||
log: log.clone(),
|
||||
}
|
||||
.spawn_manager(beacon_processor_rx, Some(work_journal_tx));
|
||||
|
@ -14,17 +14,20 @@ use beacon_chain::{
|
||||
};
|
||||
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
use slog::{crit, debug, error, info, trace, warn};
|
||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use ssz::Encode;
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use store::hot_cold_store::HotColdDBError;
|
||||
use tokio::sync::mpsc;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate,
|
||||
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBlobSidecar,
|
||||
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
|
||||
SyncCommitteeMessage, SyncSubnetId,
|
||||
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
|
||||
SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit,
|
||||
Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@ -34,7 +37,7 @@ use super::{
|
||||
},
|
||||
Worker,
|
||||
};
|
||||
use crate::beacon_processor::DuplicateCache;
|
||||
use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage};
|
||||
|
||||
/// Set to `true` to introduce stricter penalties for peers who send some types of late consensus
|
||||
/// messages.
|
||||
@ -796,6 +799,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
block: BlockWrapper<T::EthSpec>,
|
||||
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
||||
duplicate_cache: DuplicateCache,
|
||||
invalid_block_storage: InvalidBlockStorage,
|
||||
seen_duration: Duration,
|
||||
) {
|
||||
if let Some(gossip_verified_block) = self
|
||||
@ -816,6 +820,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
peer_id,
|
||||
gossip_verified_block,
|
||||
reprocess_tx,
|
||||
invalid_block_storage,
|
||||
seen_duration,
|
||||
)
|
||||
.await;
|
||||
@ -1082,13 +1087,14 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
peer_id: PeerId,
|
||||
verified_block: GossipVerifiedBlock<T>,
|
||||
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
||||
invalid_block_storage: InvalidBlockStorage,
|
||||
// This value is not used presently, but it might come in handy for debugging.
|
||||
_seen_duration: Duration,
|
||||
) {
|
||||
let block = verified_block.block.block_cloned();
|
||||
let block_root = verified_block.block_root;
|
||||
|
||||
match self
|
||||
let result = self
|
||||
.chain
|
||||
.process_block(
|
||||
block_root,
|
||||
@ -1096,14 +1102,15 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
CountUnrealized::True,
|
||||
NotifyExecutionLayer::Yes,
|
||||
)
|
||||
.await
|
||||
{
|
||||
.await;
|
||||
|
||||
match &result {
|
||||
Ok(AvailabilityProcessingStatus::Imported(block_root)) => {
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);
|
||||
|
||||
if reprocess_tx
|
||||
.try_send(ReprocessQueueMessage::BlockImported {
|
||||
block_root,
|
||||
block_root: *block_root,
|
||||
parent_root: block.message().parent_root(),
|
||||
})
|
||||
.is_err()
|
||||
@ -1137,7 +1144,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
// make rpc request for blob
|
||||
self.send_sync_message(SyncMessage::UnknownBlobHash {
|
||||
peer_id,
|
||||
pending_blobs,
|
||||
pending_blobs: pending_blobs.to_vec(),
|
||||
});
|
||||
}
|
||||
Err(BlockError::AvailabilityCheck(_)) => {
|
||||
@ -1151,7 +1158,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
"Block with unknown parent attempted to be processed";
|
||||
"peer_id" => %peer_id
|
||||
);
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(
|
||||
peer_id,
|
||||
block.clone(),
|
||||
block_root,
|
||||
));
|
||||
}
|
||||
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
|
||||
debug!(
|
||||
@ -1180,6 +1191,16 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = &result {
|
||||
self.maybe_store_invalid_block(
|
||||
&invalid_block_storage,
|
||||
block_root,
|
||||
&block,
|
||||
e,
|
||||
&self.log,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_gossip_voluntary_exit(
|
||||
@ -2487,6 +2508,25 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
"peer_id" => %peer_id,
|
||||
"type" => ?message_type,
|
||||
);
|
||||
|
||||
// Do not penalize the peer.
|
||||
|
||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||
|
||||
return;
|
||||
}
|
||||
SyncCommitteeError::PriorSyncContributionMessageKnown { .. } => {
|
||||
/*
|
||||
* We have already seen a sync contribution message from this validator for this epoch.
|
||||
*
|
||||
* The peer is not necessarily faulty.
|
||||
*/
|
||||
debug!(
|
||||
self.log,
|
||||
"Prior sync contribution message known";
|
||||
"peer_id" => %peer_id,
|
||||
"type" => ?message_type,
|
||||
);
|
||||
// We still penalize the peer slightly. We don't want this to be a recurring
|
||||
// behaviour.
|
||||
self.gossip_penalize_peer(
|
||||
@ -2651,4 +2691,62 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
self.propagate_if_timely(is_timely, message_id, peer_id)
|
||||
}
|
||||
|
||||
/// Stores a block as a SSZ file, if and where `invalid_block_storage` dictates.
|
||||
fn maybe_store_invalid_block(
|
||||
&self,
|
||||
invalid_block_storage: &InvalidBlockStorage,
|
||||
block_root: Hash256,
|
||||
block: &SignedBeaconBlock<T::EthSpec>,
|
||||
error: &BlockError<T::EthSpec>,
|
||||
log: &Logger,
|
||||
) {
|
||||
if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage {
|
||||
let block_path = base_dir.join(format!("{}_{:?}.ssz", block.slot(), block_root));
|
||||
let error_path = base_dir.join(format!("{}_{:?}.error", block.slot(), block_root));
|
||||
|
||||
let write_file = |path: PathBuf, bytes: &[u8]| {
|
||||
// No need to write the same file twice. For the error file,
|
||||
// this means that we'll remember the first error message but
|
||||
// forget the rest.
|
||||
if path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Write to the file.
|
||||
let write_result = fs::OpenOptions::new()
|
||||
// Only succeed if the file doesn't already exist. We should
|
||||
// have checked for this earlier.
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&path)
|
||||
.map_err(|e| format!("Failed to open file: {:?}", e))
|
||||
.map(|mut file| {
|
||||
file.write_all(bytes)
|
||||
.map_err(|e| format!("Failed to write file: {:?}", e))
|
||||
});
|
||||
if let Err(e) = write_result {
|
||||
error!(
|
||||
log,
|
||||
"Failed to store invalid block/error";
|
||||
"error" => e,
|
||||
"path" => ?path,
|
||||
"root" => ?block_root,
|
||||
"slot" => block.slot(),
|
||||
)
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Stored invalid block/error ";
|
||||
"path" => ?path,
|
||||
"root" => ?block_root,
|
||||
"slot" => block.slot(),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
write_file(block_path, &block.as_ssz_bytes());
|
||||
write_file(error_path, error.to_string().as_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -116,10 +116,26 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
}
|
||||
};
|
||||
|
||||
// Returns `true` if the block is already known to fork choice. Notably,
|
||||
// this will return `false` for blocks that we've already imported but
|
||||
// ancestors of the finalized checkpoint. That should not be an issue
|
||||
// for our use here since finalized blocks will always be late and won't
|
||||
// be requeued anyway.
|
||||
let block_is_already_known = || {
|
||||
self.chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.contains_block(&block_root)
|
||||
};
|
||||
|
||||
// If we've already seen a block from this proposer *and* the block
|
||||
// arrived before the attestation deadline, requeue it to ensure it is
|
||||
// imported late enough that it won't receive a proposer boost.
|
||||
if !block_is_late && proposal_already_known() {
|
||||
//
|
||||
// Don't requeue blocks if they're already known to fork choice, just
|
||||
// push them through to block processing so they can be handled through
|
||||
// the normal channels.
|
||||
if !block_is_late && proposal_already_known() && !block_is_already_known() {
|
||||
debug!(
|
||||
self.log,
|
||||
"Delaying processing of duplicate RPC block";
|
||||
|
@ -6,7 +6,7 @@
|
||||
#![allow(clippy::unit_arg)]
|
||||
|
||||
use crate::beacon_processor::{
|
||||
BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN,
|
||||
BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN,
|
||||
};
|
||||
use crate::error;
|
||||
use crate::service::{NetworkMessage, RequestId};
|
||||
@ -80,6 +80,7 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
executor: task_executor::TaskExecutor,
|
||||
invalid_block_storage: InvalidBlockStorage,
|
||||
log: slog::Logger,
|
||||
) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> {
|
||||
let message_handler_log = log.new(o!("service"=> "router"));
|
||||
@ -111,6 +112,7 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
max_workers: cmp::max(1, num_cpus::get()),
|
||||
current_workers: 0,
|
||||
importing_blocks: Default::default(),
|
||||
invalid_block_storage,
|
||||
log: log.clone(),
|
||||
}
|
||||
.spawn_manager(beacon_processor_receive, None);
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::sync::manager::RequestId as SyncId;
|
||||
use crate::beacon_processor::InvalidBlockStorage;
|
||||
use crate::persisted_dht::{clear_dht, load_dht, persist_dht};
|
||||
use crate::router::{Router, RouterMessage};
|
||||
use crate::subnet_service::SyncCommitteeService;
|
||||
@ -13,6 +14,7 @@ use futures::future::OptionFuture;
|
||||
use futures::prelude::*;
|
||||
use futures::StreamExt;
|
||||
use lighthouse_network::service::Network;
|
||||
use lighthouse_network::types::GossipKind;
|
||||
use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance};
|
||||
use lighthouse_network::{
|
||||
rpc::{GoodbyeReason, RPCResponseErrorCode},
|
||||
@ -23,7 +25,7 @@ use lighthouse_network::{
|
||||
MessageId, NetworkEvent, NetworkGlobals, PeerId,
|
||||
};
|
||||
use slog::{crit, debug, error, info, o, trace, warn};
|
||||
use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration};
|
||||
use std::{collections::HashSet, net::SocketAddr, pin::Pin, sync::Arc, time::Duration};
|
||||
use store::HotColdDB;
|
||||
use strum::IntoStaticStr;
|
||||
use task_executor::ShutdownReason;
|
||||
@ -294,6 +296,12 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
}
|
||||
}
|
||||
|
||||
let invalid_block_storage = config
|
||||
.invalid_block_storage
|
||||
.clone()
|
||||
.map(InvalidBlockStorage::Enabled)
|
||||
.unwrap_or(InvalidBlockStorage::Disabled);
|
||||
|
||||
// launch derived network services
|
||||
|
||||
// router task
|
||||
@ -302,6 +310,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
network_globals.clone(),
|
||||
network_senders.network_send(),
|
||||
executor.clone(),
|
||||
invalid_block_storage,
|
||||
network_log.clone(),
|
||||
)?;
|
||||
|
||||
@ -672,6 +681,10 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
source,
|
||||
} => self.libp2p.goodbye_peer(&peer_id, reason, source),
|
||||
NetworkMessage::SubscribeCoreTopics => {
|
||||
if self.subscribed_core_topics() {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.shutdown_after_sync {
|
||||
if let Err(e) = shutdown_sender
|
||||
.send(ShutdownReason::Success(
|
||||
@ -912,6 +925,16 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id);
|
||||
}
|
||||
}
|
||||
|
||||
fn subscribed_core_topics(&self) -> bool {
|
||||
let core_topics = core_topics_to_subscribe::<T::EthSpec>(self.fork_context.current_fork());
|
||||
let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics);
|
||||
let subscriptions = self.network_globals.gossipsub_subscriptions.read();
|
||||
let subscribed_topics: HashSet<&GossipKind> =
|
||||
subscriptions.iter().map(|topic| topic.kind()).collect();
|
||||
|
||||
core_topics.is_subset(&subscribed_topics)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `Sleep` that triggers after the next change in the beacon chain fork version.
|
||||
|
@ -112,6 +112,9 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
||||
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||
next_long_lived_subscription_event: Pin<Box<tokio::time::Sleep>>,
|
||||
|
||||
/// Whether this node is a block proposer-only node.
|
||||
proposer_only: bool,
|
||||
|
||||
/// The logger for the attestation service.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@ -155,6 +158,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
known_validators: HashSetDelay::new(last_seen_val_timeout),
|
||||
waker: None,
|
||||
discovery_disabled: config.disable_discovery,
|
||||
proposer_only: config.proposer_only,
|
||||
subscribe_all_subnets: config.subscribe_all_subnets,
|
||||
long_lived_subnet_subscription_slots,
|
||||
log,
|
||||
@ -256,6 +260,11 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
&mut self,
|
||||
subscriptions: Vec<ValidatorSubscription>,
|
||||
) -> Result<(), String> {
|
||||
// If the node is in a proposer-only state, we ignore all subnet subscriptions.
|
||||
if self.proposer_only {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Maps each subnet_id subscription to it's highest slot
|
||||
let mut subnets_to_discover: HashMap<SubnetId, Slot> = HashMap::new();
|
||||
for subscription in subscriptions {
|
||||
@ -450,6 +459,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
subnet: SubnetId,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
) -> bool {
|
||||
// Proposer-only mode does not need to process attestations
|
||||
if self.proposer_only {
|
||||
return false;
|
||||
}
|
||||
self.aggregate_validators_on_subnet
|
||||
.as_ref()
|
||||
.map(|tracked_vals| {
|
||||
|
@ -54,6 +54,9 @@ pub struct SyncCommitteeService<T: BeaconChainTypes> {
|
||||
/// We are always subscribed to all subnets.
|
||||
subscribe_all_subnets: bool,
|
||||
|
||||
/// Whether this node is a block proposer-only node.
|
||||
proposer_only: bool,
|
||||
|
||||
/// The logger for the attestation service.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@ -82,6 +85,7 @@ impl<T: BeaconChainTypes> SyncCommitteeService<T> {
|
||||
waker: None,
|
||||
subscribe_all_subnets: config.subscribe_all_subnets,
|
||||
discovery_disabled: config.disable_discovery,
|
||||
proposer_only: config.proposer_only,
|
||||
log,
|
||||
}
|
||||
}
|
||||
@ -110,6 +114,11 @@ impl<T: BeaconChainTypes> SyncCommitteeService<T> {
|
||||
&mut self,
|
||||
subscriptions: Vec<SyncCommitteeSubscription>,
|
||||
) -> Result<(), String> {
|
||||
// A proposer-only node does not subscribe to any sync-committees
|
||||
if self.proposer_only {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut subnets_to_discover = Vec::new();
|
||||
for subscription in subscriptions {
|
||||
metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS);
|
||||
|
@ -160,20 +160,20 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
// If, for some reason a backfill has already been completed (or we've used a trusted
|
||||
// genesis root) then backfill has been completed.
|
||||
|
||||
let (state, current_start) = if let Some(anchor_info) = beacon_chain.store.get_anchor_info()
|
||||
{
|
||||
if anchor_info.block_backfill_complete() {
|
||||
(BackFillState::Completed, Epoch::new(0))
|
||||
} else {
|
||||
(
|
||||
BackFillState::Paused,
|
||||
anchor_info
|
||||
.oldest_block_slot
|
||||
.epoch(T::EthSpec::slots_per_epoch()),
|
||||
)
|
||||
let (state, current_start) = match beacon_chain.store.get_anchor_info() {
|
||||
Some(anchor_info) => {
|
||||
if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) {
|
||||
(BackFillState::Completed, Epoch::new(0))
|
||||
} else {
|
||||
(
|
||||
BackFillState::Paused,
|
||||
anchor_info
|
||||
.oldest_block_slot
|
||||
.epoch(T::EthSpec::slots_per_epoch()),
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(BackFillState::NotRequired, Epoch::new(0))
|
||||
None => (BackFillState::NotRequired, Epoch::new(0)),
|
||||
};
|
||||
|
||||
let bfs = BackFillSync {
|
||||
@ -288,6 +288,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
remaining: self
|
||||
.current_start
|
||||
.start_slot(T::EthSpec::slots_per_epoch())
|
||||
.saturating_sub(self.beacon_chain.genesis_backfill_slot)
|
||||
.as_usize(),
|
||||
})
|
||||
}
|
||||
@ -1096,7 +1097,12 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
match self.batches.entry(batch_id) {
|
||||
Entry::Occupied(_) => {
|
||||
// this batch doesn't need downloading, let this same function decide the next batch
|
||||
if batch_id == 0 {
|
||||
if batch_id
|
||||
== self
|
||||
.beacon_chain
|
||||
.genesis_backfill_slot
|
||||
.epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
self.last_batch_downloaded = true;
|
||||
}
|
||||
|
||||
@ -1112,7 +1118,12 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
BACKFILL_EPOCHS_PER_BATCH,
|
||||
batch_type,
|
||||
));
|
||||
if batch_id == 0 {
|
||||
if batch_id
|
||||
== self
|
||||
.beacon_chain
|
||||
.genesis_backfill_slot
|
||||
.epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
self.last_batch_downloaded = true;
|
||||
}
|
||||
self.to_be_downloaded = self
|
||||
@ -1129,7 +1140,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
/// not required.
|
||||
fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> {
|
||||
if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() {
|
||||
if anchor_info.block_backfill_complete() {
|
||||
if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) {
|
||||
Err(ResetEpochError::SyncCompleted)
|
||||
} else {
|
||||
self.current_start = anchor_info
|
||||
@ -1144,12 +1155,17 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
|
||||
/// Checks with the beacon chain if backfill sync has completed.
|
||||
fn check_completed(&mut self) -> bool {
|
||||
if self.current_start == 0 {
|
||||
if self.current_start
|
||||
== self
|
||||
.beacon_chain
|
||||
.genesis_backfill_slot
|
||||
.epoch(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
// Check that the beacon chain agrees
|
||||
|
||||
if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() {
|
||||
// Conditions that we have completed a backfill sync
|
||||
if anchor_info.block_backfill_complete() {
|
||||
if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) {
|
||||
return true;
|
||||
} else {
|
||||
error!(self.log, "Backfill out of sync with beacon chain");
|
||||
|
@ -56,7 +56,7 @@ impl TestRig {
|
||||
};
|
||||
let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups")));
|
||||
let cx = {
|
||||
let globals = Arc::new(NetworkGlobals::new_test_globals(&log));
|
||||
let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log));
|
||||
SyncNetworkContext::new(
|
||||
network_tx,
|
||||
globals,
|
||||
|
@ -599,7 +599,7 @@ mod tests {
|
||||
log.new(o!("component" => "range")),
|
||||
);
|
||||
let (network_tx, network_rx) = mpsc::unbounded_channel();
|
||||
let globals = Arc::new(NetworkGlobals::new_test_globals(&log));
|
||||
let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log));
|
||||
let cx = SyncNetworkContext::new(
|
||||
network_tx,
|
||||
globals.clone(),
|
||||
|
@ -12,8 +12,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
parking_lot = "0.12.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_derive = "0.3.1"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ethereum_ssz_derive = "0.5.0"
|
||||
rayon = "1.5.0"
|
||||
serde = "1.0.116"
|
||||
serde_derive = "1.0.116"
|
||||
|
@ -130,7 +130,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
Arg::with_name("target-peers")
|
||||
.long("target-peers")
|
||||
.help("The target number of peers.")
|
||||
.default_value("80")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
@ -255,6 +254,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("genesis-backfill")
|
||||
.long("genesis-backfill")
|
||||
.help("Attempts to download blocks all the way back to genesis when checkpoint syncing.")
|
||||
.takes_value(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("enable-private-discovery")
|
||||
.long("enable-private-discovery")
|
||||
@ -276,6 +281,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.min_values(0)
|
||||
.hidden(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("proposer-only")
|
||||
.long("proposer-only")
|
||||
.help("Sets this beacon node at be a block proposer only node. \
|
||||
This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \
|
||||
for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.")
|
||||
.takes_value(false),
|
||||
)
|
||||
|
||||
.arg(
|
||||
Arg::with_name("disable-backfill-rate-limiting")
|
||||
.long("disable-backfill-rate-limiting")
|
||||
@ -518,6 +532,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.help("Specifies how many blocks the database should cache in memory [default: 5]")
|
||||
.takes_value(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("historic-state-cache-size")
|
||||
.long("historic-state-cache-size")
|
||||
.value_name("SIZE")
|
||||
.help("Specifies how many states from the freezer database should cache in memory [default: 1]")
|
||||
.takes_value(true)
|
||||
)
|
||||
/*
|
||||
* Execution Layer Integration
|
||||
*/
|
||||
@ -858,7 +879,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.arg(
|
||||
Arg::with_name("reconstruct-historic-states")
|
||||
.long("reconstruct-historic-states")
|
||||
.help("After a checkpoint sync, reconstruct historic states in the database.")
|
||||
.help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.")
|
||||
.takes_value(false)
|
||||
)
|
||||
.arg(
|
||||
@ -1102,7 +1123,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.long("gui")
|
||||
.hidden(true)
|
||||
.help("Enable the graphical user interface and all its requirements. \
|
||||
This is equivalent to --http and --validator-monitor-auto.")
|
||||
This enables --http and --validator-monitor-auto and enables SSE logging.")
|
||||
.takes_value(false)
|
||||
)
|
||||
.arg(
|
||||
@ -1114,4 +1135,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
// always using the builder.
|
||||
.conflicts_with("builder-profit-threshold")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("invalid-gossip-verified-blocks-path")
|
||||
.long("invalid-gossip-verified-blocks-path")
|
||||
.value_name("PATH")
|
||||
.help("If a block succeeds gossip validation whilst failing full validation, store \
|
||||
the block SSZ as a file at this path. This feature is only recommended for \
|
||||
developers. This directory is not pruned, users should be careful to avoid \
|
||||
filling up their disks.")
|
||||
)
|
||||
}
|
||||
|
@ -404,6 +404,12 @@ pub fn get_config<E: EthSpec>(
|
||||
.map_err(|_| "block-cache-size is not a valid integer".to_string())?;
|
||||
}
|
||||
|
||||
if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") {
|
||||
client_config.store.historic_state_cache_size = historic_state_cache_size
|
||||
.parse()
|
||||
.map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?;
|
||||
}
|
||||
|
||||
client_config.store.compact_on_init = cli_args.is_present("compact-db");
|
||||
if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") {
|
||||
client_config.store.compact_on_prune = compact_on_prune
|
||||
@ -539,6 +545,7 @@ pub fn get_config<E: EthSpec>(
|
||||
|
||||
if cli_args.is_present("reconstruct-historic-states") {
|
||||
client_config.chain.reconstruct_historic_states = true;
|
||||
client_config.chain.genesis_backfill = true;
|
||||
}
|
||||
|
||||
let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") {
|
||||
@ -811,6 +818,9 @@ pub fn get_config<E: EthSpec>(
|
||||
client_config.chain.optimistic_finalized_sync =
|
||||
!cli_args.is_present("disable-optimistic-finalized-sync");
|
||||
|
||||
if cli_args.is_present("genesis-backfill") {
|
||||
client_config.chain.genesis_backfill = true;
|
||||
}
|
||||
// Payload selection configs
|
||||
if cli_args.is_present("always-prefer-builder-payload") {
|
||||
client_config.always_prefer_builder_payload = true;
|
||||
@ -820,6 +830,11 @@ pub fn get_config<E: EthSpec>(
|
||||
client_config.chain.enable_backfill_rate_limiting =
|
||||
!cli_args.is_present("disable-backfill-rate-limiting");
|
||||
|
||||
if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")?
|
||||
{
|
||||
client_config.network.invalid_block_storage = Some(path);
|
||||
}
|
||||
|
||||
Ok(client_config)
|
||||
}
|
||||
|
||||
@ -1016,10 +1031,13 @@ pub fn set_network_config(
|
||||
|
||||
config.set_listening_addr(parse_listening_addresses(cli_args, log)?);
|
||||
|
||||
// A custom target-peers command will overwrite the --proposer-only default.
|
||||
if let Some(target_peers_str) = cli_args.value_of("target-peers") {
|
||||
config.target_peers = target_peers_str
|
||||
.parse::<usize>()
|
||||
.map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?;
|
||||
} else {
|
||||
config.target_peers = 80; // default value
|
||||
}
|
||||
|
||||
if let Some(value) = cli_args.value_of("network-load") {
|
||||
@ -1078,6 +1096,9 @@ pub fn set_network_config(
|
||||
.map_err(|_| format!("Invalid trusted peer id: {}", peer_id))
|
||||
})
|
||||
.collect::<Result<Vec<PeerIdSerialized>, _>>()?;
|
||||
if config.trusted_peers.len() >= config.target_peers {
|
||||
slog::warn!(log, "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."; "target_peers" => config.target_peers, "trusted_peers" => config.trusted_peers.len());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") {
|
||||
@ -1255,6 +1276,20 @@ pub fn set_network_config(
|
||||
config.outbound_rate_limiter_config = Some(Default::default());
|
||||
}
|
||||
|
||||
// Proposer-only mode overrides a number of previous configuration parameters.
|
||||
// Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set
|
||||
// of peers.
|
||||
if cli_args.is_present("proposer-only") {
|
||||
config.subscribe_all_subnets = false;
|
||||
|
||||
if cli_args.value_of("target-peers").is_none() {
|
||||
// If a custom value is not set, change the default to 15
|
||||
config.target_peers = 15;
|
||||
}
|
||||
config.proposer_only = true;
|
||||
warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ db-key = "0.0.5"
|
||||
leveldb = { version = "0.8.6", default-features = false }
|
||||
parking_lot = "0.12.0"
|
||||
itertools = "0.10.0"
|
||||
eth2_ssz = "0.4.1"
|
||||
eth2_ssz_derive = "0.3.1"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ethereum_ssz_derive = "0.5.0"
|
||||
types = { path = "../../consensus/types" }
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
slog = "2.5.2"
|
||||
|
@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec};
|
||||
pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
|
||||
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192;
|
||||
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
|
||||
pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1;
|
||||
pub const DEFAULT_BLOB_CACHE_SIZE: usize = 5;
|
||||
pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1;
|
||||
pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0;
|
||||
@ -20,6 +21,8 @@ pub struct StoreConfig {
|
||||
pub slots_per_restore_point_set_explicitly: bool,
|
||||
/// Maximum number of blocks to store in the in-memory block cache.
|
||||
pub block_cache_size: usize,
|
||||
/// Maximum number of states from freezer database to store in the in-memory state cache.
|
||||
pub historic_state_cache_size: usize,
|
||||
/// Maximum number of blobs to store in the in-memory blob cache.
|
||||
pub blob_cache_size: usize,
|
||||
/// Whether to compact the database on initialization.
|
||||
@ -55,6 +58,7 @@ impl Default for StoreConfig {
|
||||
slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64,
|
||||
slots_per_restore_point_set_explicitly: false,
|
||||
block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
|
||||
historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE,
|
||||
blob_cache_size: DEFAULT_BLOB_CACHE_SIZE,
|
||||
compact_on_init: false,
|
||||
compact_on_prune: true,
|
||||
|
@ -30,7 +30,7 @@ use slog::{debug, error, info, trace, warn, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::{
|
||||
BlockProcessingError, BlockReplayer, SlotProcessingError, StateRootStrategy,
|
||||
BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy,
|
||||
};
|
||||
use std::cmp::min;
|
||||
use std::convert::TryInto;
|
||||
@ -70,6 +70,8 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||
blob_cache: Mutex<LruCache<Hash256, BlobSidecarList<E>>>,
|
||||
/// LRU cache of deserialized blocks. Updated whenever a block is loaded.
|
||||
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
|
||||
/// LRU cache of replayed states.
|
||||
state_cache: Mutex<LruCache<Slot, BeaconState<E>>>,
|
||||
/// Chain spec.
|
||||
pub(crate) spec: ChainSpec,
|
||||
/// Logger.
|
||||
@ -143,6 +145,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
|
||||
blobs_db: Some(MemoryStore::open()),
|
||||
hot_db: MemoryStore::open(),
|
||||
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
||||
state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)),
|
||||
blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)),
|
||||
config,
|
||||
spec,
|
||||
@ -180,6 +183,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
||||
blobs_db: None,
|
||||
hot_db: LevelDB::open(hot_path)?,
|
||||
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
||||
state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)),
|
||||
blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)),
|
||||
config,
|
||||
spec,
|
||||
@ -632,10 +636,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
// chain. This way we avoid returning a state that doesn't match `state_root`.
|
||||
self.load_cold_state(state_root)
|
||||
} else {
|
||||
self.load_hot_state(state_root, StateRootStrategy::Accurate)
|
||||
self.load_hot_state(state_root, StateProcessingStrategy::Accurate)
|
||||
}
|
||||
} else {
|
||||
match self.load_hot_state(state_root, StateRootStrategy::Accurate)? {
|
||||
match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? {
|
||||
Some(state) => Ok(Some(state)),
|
||||
None => self.load_cold_state(state_root),
|
||||
}
|
||||
@ -673,7 +677,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
}
|
||||
.into())
|
||||
} else {
|
||||
self.load_hot_state(state_root, StateRootStrategy::Inconsistent)
|
||||
self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent)
|
||||
}
|
||||
}
|
||||
|
||||
@ -763,10 +767,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
{
|
||||
// NOTE: minor inefficiency here because we load an unnecessary hot state summary
|
||||
//
|
||||
// `StateRootStrategy` should be irrelevant here since we never replay blocks for an epoch
|
||||
// `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch
|
||||
// boundary state in the hot DB.
|
||||
let state = self
|
||||
.load_hot_state(&epoch_boundary_state_root, StateRootStrategy::Accurate)?
|
||||
.load_hot_state(
|
||||
&epoch_boundary_state_root,
|
||||
StateProcessingStrategy::Accurate,
|
||||
)?
|
||||
.ok_or(HotColdDBError::MissingEpochBoundaryState(
|
||||
epoch_boundary_state_root,
|
||||
))?;
|
||||
@ -1026,7 +1033,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
pub fn load_hot_state(
|
||||
&self,
|
||||
state_root: &Hash256,
|
||||
state_root_strategy: StateRootStrategy,
|
||||
state_processing_strategy: StateProcessingStrategy,
|
||||
) -> Result<Option<BeaconState<E>>, Error> {
|
||||
metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT);
|
||||
|
||||
@ -1059,7 +1066,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
blocks,
|
||||
slot,
|
||||
no_state_root_iter(),
|
||||
state_root_strategy,
|
||||
state_processing_strategy,
|
||||
)?
|
||||
};
|
||||
|
||||
@ -1173,40 +1180,70 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
/// Load a frozen state that lies between restore points.
|
||||
fn load_cold_intermediate_state(&self, slot: Slot) -> Result<BeaconState<E>, Error> {
|
||||
if let Some(state) = self.state_cache.lock().get(&slot) {
|
||||
return Ok(state.clone());
|
||||
}
|
||||
|
||||
// 1. Load the restore points either side of the intermediate state.
|
||||
let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point;
|
||||
let high_restore_point_idx = low_restore_point_idx + 1;
|
||||
|
||||
// Use low restore point as the base state.
|
||||
let mut low_slot: Slot =
|
||||
Slot::new(low_restore_point_idx * self.config.slots_per_restore_point);
|
||||
let mut low_state: Option<BeaconState<E>> = None;
|
||||
|
||||
// Try to get a more recent state from the cache to avoid massive blocks replay.
|
||||
for (s, state) in self.state_cache.lock().iter() {
|
||||
if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx
|
||||
&& *s < slot
|
||||
&& low_slot < *s
|
||||
{
|
||||
low_slot = *s;
|
||||
low_state = Some(state.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// If low_state is still None, use load_restore_point_by_index to load the state.
|
||||
let low_state = match low_state {
|
||||
Some(state) => state,
|
||||
None => self.load_restore_point_by_index(low_restore_point_idx)?,
|
||||
};
|
||||
|
||||
// Acquire the read lock, so that the split can't change while this is happening.
|
||||
let split = self.split.read_recursive();
|
||||
|
||||
let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?;
|
||||
let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?;
|
||||
|
||||
// 2. Load the blocks from the high restore point back to the low restore point.
|
||||
// 2. Load the blocks from the high restore point back to the low point.
|
||||
let blocks = self.load_blocks_to_replay(
|
||||
low_restore_point.slot(),
|
||||
low_slot,
|
||||
slot,
|
||||
self.get_high_restore_point_block_root(&high_restore_point, slot)?,
|
||||
)?;
|
||||
|
||||
// 3. Replay the blocks on top of the low restore point.
|
||||
// 3. Replay the blocks on top of the low point.
|
||||
// Use a forwards state root iterator to avoid doing any tree hashing.
|
||||
// The state root of the high restore point should never be used, so is safely set to 0.
|
||||
let state_root_iter = self.forwards_state_roots_iterator_until(
|
||||
low_restore_point.slot(),
|
||||
low_slot,
|
||||
slot,
|
||||
|| Ok((high_restore_point, Hash256::zero())),
|
||||
&self.spec,
|
||||
)?;
|
||||
|
||||
self.replay_blocks(
|
||||
low_restore_point,
|
||||
let state = self.replay_blocks(
|
||||
low_state,
|
||||
blocks,
|
||||
slot,
|
||||
Some(state_root_iter),
|
||||
StateRootStrategy::Accurate,
|
||||
)
|
||||
StateProcessingStrategy::Accurate,
|
||||
)?;
|
||||
|
||||
// If state is not error, put it in the cache.
|
||||
self.state_cache.lock().put(slot, state.clone());
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Get the restore point with the given index, or if it is out of bounds, the split state.
|
||||
@ -1292,10 +1329,10 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
blocks: Vec<SignedBeaconBlock<E, BlindedPayload<E>>>,
|
||||
target_slot: Slot,
|
||||
state_root_iter: Option<impl Iterator<Item = Result<(Hash256, Slot), Error>>>,
|
||||
state_root_strategy: StateRootStrategy,
|
||||
state_processing_strategy: StateProcessingStrategy,
|
||||
) -> Result<BeaconState<E>, Error> {
|
||||
let mut block_replayer = BlockReplayer::new(state, &self.spec)
|
||||
.state_root_strategy(state_root_strategy)
|
||||
.state_processing_strategy(state_processing_strategy)
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification();
|
||||
|
||||
|
@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{Checkpoint, Hash256, Slot};
|
||||
|
||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(16);
|
||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(17);
|
||||
|
||||
// All the keys that get stored under the `BeaconMeta` column.
|
||||
//
|
||||
@ -100,8 +100,10 @@ pub struct AnchorInfo {
|
||||
|
||||
impl AnchorInfo {
|
||||
/// Returns true if the block backfill has completed.
|
||||
pub fn block_backfill_complete(&self) -> bool {
|
||||
self.oldest_block_slot == 0
|
||||
/// This is a comparison between the oldest block slot and the target backfill slot (which is
|
||||
/// likely to be the closest WSP).
|
||||
pub fn block_backfill_complete(&self, target_slot: Slot) -> bool {
|
||||
self.oldest_block_slot <= target_slot
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ use itertools::{process_results, Itertools};
|
||||
use slog::info;
|
||||
use state_processing::{
|
||||
per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext,
|
||||
VerifyBlockRoot,
|
||||
StateProcessingStrategy, VerifyBlockRoot,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use types::{EthSpec, Hash256};
|
||||
@ -96,6 +96,7 @@ where
|
||||
&mut state,
|
||||
&block,
|
||||
BlockSignatureStrategy::NoVerification,
|
||||
StateProcessingStrategy::Accurate,
|
||||
VerifyBlockRoot::True,
|
||||
&mut ctxt,
|
||||
&self.spec,
|
||||
|
66
book/src/LaTeX/full-withdrawal.tex
Normal file
66
book/src/LaTeX/full-withdrawal.tex
Normal file
@ -0,0 +1,66 @@
|
||||
% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain
|
||||
|
||||
% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted.
|
||||
|
||||
\documentclass[]{article}
|
||||
\usepackage{pst-all}
|
||||
\pagestyle{empty}
|
||||
|
||||
|
||||
|
||||
\begin{document}
|
||||
|
||||
|
||||
\begin{figure}
|
||||
\psscalebox{1.0 1.0} % Change this value to rescale the drawing.
|
||||
{
|
||||
\begin{pspicture}(0,-9.09)(11.8,6.13)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](7.3,6.13)(4.2,5.21)
|
||||
\rput[bl](4.6,5.51){Voluntary exit}
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.8,5.21)(5.8,3.71)(5.8,3.81)
|
||||
\psline[linecolor=black, linewidth=0.04](1.7,3.61)(9.8,3.61)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,3.61)(1.7,2.61)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](2.9,2.63)(0.8,1.55)
|
||||
\rput[bl](1.0,1.91){Type 0x00}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,2.63)(8.6,1.55)
|
||||
\rput[bl](8.8,1.91){Type 0x01}
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,3.61)(9.8,2.61)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,1.51)(1.7,0.61)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,0.61)(0.0,-1.19)
|
||||
\rput[bl](0.6,-0.19){Funds locked in}
|
||||
\rput[bl](0.7,-0.79){Beacon chain}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.8,0.73)(7.9,-1.39)
|
||||
\rput[bl](9.0,-0.59){Exit queue}
|
||||
\rput[bl](8.8,0.01){Varying time}
|
||||
\rput[bl](8.3,-1.09){32 minutes to weeks}
|
||||
\rput[bl](9.0,-2.89){Fixed time}
|
||||
\rput[bl](9.0,-3.49){27.3 hours}
|
||||
\rput[bl](8.8,-5.49){Varying time}
|
||||
\rput[bl](8.7,-5.99){validator sweep}
|
||||
\rput[bl](8.9,-6.59){up to 5 days}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29)
|
||||
\rput[bl](1.3,-3.29){BLS to}
|
||||
\rput[bl](0.6,-3.89){execution change}
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.19)(1.7,-2.49)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,1.51)(9.8,0.71)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-1.39)(9.8,-2.19)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-3.89)(9.8,-4.79)
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm](3.7,-3.39)(5.8,-3.39)
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-0.39)(7.9,-0.39)
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(8.0,-3.39)
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-6.09)(7.9,-6.09)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-7.79)(7.9,-9.09)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-6.89)(9.8,-7.79)
|
||||
\rput[bl](8.1,-8.59){\Large{Full withdrawal}}
|
||||
\rput[bl](1.8,-2.09){\textit{\Large{anytime}}}
|
||||
\rput[bl](4.0,-3.19){\textit{\Large{either}}}
|
||||
\rput[bl](4.2,-3.89){\textit{\Large{one}}}
|
||||
\end{pspicture}
|
||||
}
|
||||
\end{figure}
|
||||
|
||||
|
||||
|
||||
\end{document}
|
50
book/src/LaTeX/partial-withdrawal.tex
Normal file
50
book/src/LaTeX/partial-withdrawal.tex
Normal file
@ -0,0 +1,50 @@
|
||||
% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain
|
||||
|
||||
% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted.
|
||||
|
||||
|
||||
\documentclass[]{article}
|
||||
\usepackage{pst-all}
|
||||
\pagestyle{empty}
|
||||
|
||||
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{figure}
|
||||
\psscalebox{1.0 1.0} % Change this value to rescale the drawing.
|
||||
{
|
||||
\begin{pspicture}(0,-8.09)(10.7,5.53)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](7.14,5.53)(3.6,4.45)
|
||||
\rput[bl](3.8,4.81){Partial withdrawals}
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.2,4.41)(5.2,2.91)(5.2,3.01)
|
||||
\psline[linecolor=black, linewidth=0.04](1.8,2.81)(8.9,2.81)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.8,2.81)(1.8,1.81)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](2.7,1.83)(0.6,0.75)
|
||||
\rput[bl](0.8,1.09){Type 0x00}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](9.8,1.83)(7.7,0.75)
|
||||
\rput[bl](7.92,1.07){Type 0x01}
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,2.81)(8.9,1.81)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,0.71)(1.7,-0.19)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-0.19)(0.0,-1.99)
|
||||
\rput[bl](0.66,-0.99){Funds locked in}
|
||||
\rput[bl](0.9,-1.59){Beacon chain}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09)
|
||||
\rput[bl](7.6,-3.99){validator sweep}
|
||||
\rput[bl](7.5,-4.69){$\sim$ every 5 days}
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09)
|
||||
\rput[bl](1.3,-4.09){BLS to}
|
||||
\rput[bl](0.5,-4.69){execution change}
|
||||
\psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.99)(1.7,-3.29)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,0.71)(8.9,-3.29)
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(3.7,-4.19)(6.7,-4.19)
|
||||
\psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-6.29)(6.9,-8.09)
|
||||
\rput[bl](7.0,-6.99){Balance above 32 ETH}
|
||||
\rput[bl](7.9,-7.59){withdrawn}
|
||||
\psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,-5.09)(8.9,-6.29)
|
||||
\rput[bl](1.8,-2.89){\textit{\Large{anytime}}}
|
||||
\end{pspicture}
|
||||
}
|
||||
\end{figure}
|
||||
|
||||
\end{document}
|
@ -11,15 +11,10 @@
|
||||
* [Update Priorities](./installation-priorities.md)
|
||||
* [Run a Node](./run_a_node.md)
|
||||
* [Become a Validator](./mainnet-validator.md)
|
||||
* [Become a Testnet Validator](./testnet-validator.md)
|
||||
* [Key Management](./key-management.md)
|
||||
* [Create a wallet](./wallet-create.md)
|
||||
* [Create a validator](./validator-create.md)
|
||||
* [Key recovery](./key-recovery.md)
|
||||
* [Validator Management](./validator-management.md)
|
||||
* [Importing from the Staking Launchpad](./validator-import-launchpad.md)
|
||||
* [Slashing Protection](./slashing-protection.md)
|
||||
* [Voluntary Exits](./voluntary-exit.md)
|
||||
* [Partial Withdrawals](./partial-withdrawal.md)
|
||||
* [Validator Monitoring](./validator-monitoring.md)
|
||||
* [Doppelganger Protection](./validator-doppelganger.md)
|
||||
* [Suggested Fee Recipient](./suggested-fee-recipient.md)
|
||||
@ -34,6 +29,7 @@
|
||||
* [Prometheus Metrics](./advanced_metrics.md)
|
||||
* [Lighthouse UI (Siren)](./lighthouse-ui.md)
|
||||
* [Installation](./ui-installation.md)
|
||||
* [Authentication](./ui-authentication.md)
|
||||
* [Configuration](./ui-configuration.md)
|
||||
* [Usage](./ui-usage.md)
|
||||
* [FAQs](./ui-faqs.md)
|
||||
@ -41,9 +37,12 @@
|
||||
* [Checkpoint Sync](./checkpoint-sync.md)
|
||||
* [Custom Data Directories](./advanced-datadir.md)
|
||||
* [Validator Graffiti](./graffiti.md)
|
||||
* [Proposer Only Beacon Nodes](./advanced-proposer-only.md)
|
||||
* [Remote Signing with Web3Signer](./validator-web3signer.md)
|
||||
* [Database Configuration](./advanced_database.md)
|
||||
* [Database Migrations](./database-migrations.md)
|
||||
* [Key Management](./key-management.md)
|
||||
* [Key Recovery](./key-recovery.md)
|
||||
* [Advanced Networking](./advanced_networking.md)
|
||||
* [Running a Slasher](./slasher.md)
|
||||
* [Redundancy](./redundancy.md)
|
||||
|
71
book/src/advanced-proposer-only.md
Normal file
71
book/src/advanced-proposer-only.md
Normal file
@ -0,0 +1,71 @@
|
||||
# Advanced Proposer-Only Beacon Nodes
|
||||
|
||||
Lighthouse allows for more exotic setups that can minimize attack vectors by
|
||||
adding redundant beacon nodes and dividing the roles of attesting and block
|
||||
production between them.
|
||||
|
||||
The purpose of this is to minimize attack vectors
|
||||
where malicious users obtain the network identities (IP addresses) of beacon
|
||||
nodes corresponding to individual validators and subsequently perform Denial Of Service
|
||||
attacks on the beacon nodes when they are due to produce a block on the
|
||||
network. By splitting the duties of attestation and block production across
|
||||
different beacon nodes, an attacker may not know which node is the block
|
||||
production node, especially if the user rotates IP addresses of the block
|
||||
production beacon node in between block proposals (this is in-frequent with
|
||||
networks with large validator counts).
|
||||
|
||||
## The Beacon Node
|
||||
|
||||
A Lighthouse beacon node can be configured with the `--proposer-only` flag
|
||||
(i.e. `lighthouse bn --proposer-only`).
|
||||
Setting a beacon node with this flag will limit its use as a beacon node for
|
||||
normal activities such as performing attestations, but it will make the node
|
||||
harder to identify as a potential node to attack and will also consume less
|
||||
resources.
|
||||
|
||||
Specifically, this flag reduces the default peer count (to a safe minimal
|
||||
number as maintaining peers on attestation subnets do not need to be considered),
|
||||
prevents the node from subscribing to any attestation-subnets or
|
||||
sync-committees which is a primary way for attackers to de-anonymize
|
||||
validators.
|
||||
|
||||
> Note: Beacon nodes that have set the `--proposer-only` flag should not be connected
|
||||
> to validator clients unless via the `--proposer-nodes` flag. If connected as a
|
||||
> normal beacon node, the validator may fail to handle its duties correctly and
|
||||
> result in a loss of income.
|
||||
|
||||
|
||||
## The Validator Client
|
||||
|
||||
The validator client can be given a list of HTTP API endpoints representing
|
||||
beacon nodes that will be solely used for block propagation on the network, via
|
||||
the CLI flag `--proposer-nodes`. These nodes can be any working beacon nodes
|
||||
and do not specifically have to be proposer-only beacon nodes that have been
|
||||
executed with the `--proposer-only` (although we do recommend this flag for
|
||||
these nodes for added security).
|
||||
|
||||
> Note: The validator client still requires at least one other beacon node to
|
||||
> perform its duties and must be specified in the usual `--beacon-nodes` flag.
|
||||
|
||||
> Note: The validator client will attempt to get a block to propose from the
|
||||
> beacon nodes specified in `--beacon-nodes` before trying `--proposer-nodes`.
|
||||
> This is because the nodes subscribed to subnets have a higher chance of
|
||||
> producing a more profitable block. Any block builders should therefore be
|
||||
> attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`.
|
||||
|
||||
|
||||
## Setup Overview
|
||||
|
||||
The intended set-up to take advantage of this mechanism is to run one (or more)
|
||||
normal beacon nodes in conjunction with one (or more) proposer-only beacon
|
||||
nodes. See the [Redundancy](./redundancy.md) section for more information about
|
||||
setting up redundant beacon nodes. The proposer-only beacon nodes should be
|
||||
setup to use a different IP address than the primary (non proposer-only) nodes.
|
||||
For added security, the IP addresses of the proposer-only nodes should be
|
||||
rotated occasionally such that a new IP-address is used per block proposal.
|
||||
|
||||
A single validator client can then connect to all of the above nodes via the
|
||||
`--beacon-nodes` and `--proposer-nodes` flags. The resulting setup will allow
|
||||
the validator client to perform its regular duties on the standard beacon nodes
|
||||
and when the time comes to propose a block, it will send this block via the
|
||||
specified proposer-only nodes.
|
@ -58,6 +58,16 @@ the `--slots-per-restore-point` flag:
|
||||
lighthouse beacon_node --slots-per-restore-point 32
|
||||
```
|
||||
|
||||
### Historic state cache
|
||||
|
||||
Lighthouse includes a cache to avoid repeatedly replaying blocks when loading historic states. Lighthouse will cache a limited number of reconstructed states and will re-use them when serving requests for subsequent states at higher slots. This greatly reduces the cost of requesting several states in order, and we recommend that applications like block explorers take advantage of this cache.
|
||||
|
||||
The historical state cache size can be specified with the flag `--historic-state-cache-size` (default value is 1):
|
||||
|
||||
```bash
|
||||
lighthouse beacon_node --historic-state-cache-size 4
|
||||
```
|
||||
|
||||
## Glossary
|
||||
|
||||
* _Freezer DB_: part of the database storing finalized states. States are stored in a sparser
|
||||
|
@ -72,8 +72,7 @@ specification][OpenAPI].
|
||||
Returns the block header at the head of the canonical chain.
|
||||
|
||||
```bash
|
||||
curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept:
|
||||
application/json"
|
||||
curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: application/json" | jq
|
||||
```
|
||||
|
||||
```json
|
||||
@ -100,7 +99,7 @@ application/json"
|
||||
Shows the status of validator at index `1` at the `head` state.
|
||||
|
||||
```bash
|
||||
curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json"
|
||||
curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq
|
||||
```
|
||||
|
||||
```json
|
||||
@ -159,8 +158,7 @@ The API is now being served at `https://localhost:5052`.
|
||||
|
||||
To test connectivity, you can run the following:
|
||||
```bash
|
||||
curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem
|
||||
|
||||
curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq
|
||||
```
|
||||
### Connecting a validator client
|
||||
In order to connect a validator client to a beacon node over TLS, the validator
|
||||
@ -203,7 +201,7 @@ Ensure the `--http` flag has been supplied at the CLI.
|
||||
You can quickly check that the HTTP endpoint is up using `curl`:
|
||||
|
||||
```bash
|
||||
curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json"
|
||||
curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" | jq
|
||||
```
|
||||
|
||||
The beacon node should respond with its version:
|
||||
|
@ -141,7 +141,8 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic
|
||||
"attestation_head_hit_percentage": 100,
|
||||
"attestation_target_hits": 5,
|
||||
"attestation_target_misses": 5,
|
||||
"attestation_target_hit_percentage": 50
|
||||
"attestation_target_hit_percentage": 50,
|
||||
"latest_attestation_inclusion_distance": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -455,6 +456,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq
|
||||
"config": {
|
||||
"slots_per_restore_point": 2048,
|
||||
"block_cache_size": 5,
|
||||
"historic_state_cache_size": 1,
|
||||
"compact_on_init": false,
|
||||
"compact_on_prune": true
|
||||
},
|
||||
@ -677,3 +679,31 @@ Caveats:
|
||||
This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and
|
||||
loading a state on a boundary is most efficient.
|
||||
|
||||
|
||||
### `/lighthouse/logs`
|
||||
|
||||
This is a Server Side Event subscription endpoint. This allows a user to read
|
||||
the Lighthouse logs directly from the HTTP API endpoint. This currently
|
||||
exposes INFO and higher level logs. It is only enabled when the `--gui` flag is set in the CLI.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -N "http://localhost:5052/lighthouse/logs"
|
||||
```
|
||||
|
||||
Should provide an output that emits log events as they occur:
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"time": "Mar 13 15:28:41",
|
||||
"level": "INFO",
|
||||
"msg": "Syncing",
|
||||
"service": "slot_notifier",
|
||||
"est_time": "1 hr 27 mins",
|
||||
"speed": "5.33 slots/sec",
|
||||
"distance": "28141 slots (3 days 21 hrs)",
|
||||
"peers": "8"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -578,3 +578,33 @@ The following fields may be omitted or nullified to obtain default values:
|
||||
### Example Response Body
|
||||
|
||||
*No data is included in the response body.*
|
||||
|
||||
## `GET /lighthouse/logs`
|
||||
|
||||
Provides a subscription to receive logs as Server Side Events. Currently the
|
||||
logs emitted are INFO level or higher.
|
||||
|
||||
### HTTP Specification
|
||||
|
||||
| Property | Specification |
|
||||
|-------------------|--------------------------------------------|
|
||||
| Path | `/lighthouse/logs` |
|
||||
| Method | GET |
|
||||
| Required Headers | None |
|
||||
| Typical Responses | 200 |
|
||||
|
||||
### Example Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"time": "Mar 13 15:26:53",
|
||||
"level": "INFO",
|
||||
"msg": "Connected to beacon node(s)",
|
||||
"service": "notifier",
|
||||
"synced": 1,
|
||||
"available": 1,
|
||||
"total": 1
|
||||
}
|
||||
}
|
||||
```
|
@ -178,7 +178,7 @@ You can check that your builder is configured correctly by looking for these log
|
||||
On start-up, the beacon node will log if a builder is configured:
|
||||
|
||||
```
|
||||
INFO Connected to external block builder
|
||||
INFO Using external block builder
|
||||
```
|
||||
|
||||
At regular intervals the validator client will log that it successfully registered its validators
|
||||
|
@ -92,6 +92,7 @@ curl "http://localhost:5052/lighthouse/database/info"
|
||||
"slots_per_restore_point": 8192,
|
||||
"slots_per_restore_point_set_explicitly": true,
|
||||
"block_cache_size": 5,
|
||||
"historic_state_cache_size": 1,
|
||||
"compact_on_init": false,
|
||||
"compact_on_prune": true
|
||||
}
|
||||
|
BIN
book/src/imgs/full-withdrawal.png
Normal file
BIN
book/src/imgs/full-withdrawal.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 257 KiB |
BIN
book/src/imgs/partial-withdrawal.png
Normal file
BIN
book/src/imgs/partial-withdrawal.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 172 KiB |
BIN
book/src/imgs/ui-autoconnect-auth.png
Normal file
BIN
book/src/imgs/ui-autoconnect-auth.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.3 MiB |
BIN
book/src/imgs/ui-exit.png
Normal file
BIN
book/src/imgs/ui-exit.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 507 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user