2020-03-06 05:09:41 +00:00
|
|
|
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
2020-10-19 05:58:39 +00:00
|
|
|
pub use crate::{
|
|
|
|
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
|
|
|
migrate::MigratorConfig,
|
2022-11-29 08:19:27 +00:00
|
|
|
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
|
2020-10-19 05:58:39 +00:00
|
|
|
};
|
2019-09-04 00:25:30 +00:00
|
|
|
use crate::{
|
Eth1 Integration (#542)
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Initial commit. web3 api working.
* Tidied up lib. Add function for fetching logs.
* Refactor with `Eth1DataFetcher` trait
* Add parsing for deposit contract logs and get_eth1_data function
* Add `get_eth1_votes` function
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Fix merge issue
* Refactor with `Config` struct. Remote `ContractConfig`
* Rename eth1_chain crate to eth1
* Rename files and read abi file using `fs::read`
* Move eth1 to lib
* Remove unnecessary mutability constraint
* Add `Web3Backend` for returning actual eth1 data
* Refactor `get_eth1_votes` to return a Result
* Delete `eth1_chain` crate
* Return `Result` from `get_deposits`
* Fix range of deposits to return to beacon chain
* Add `get_block_height_by_hash` trait function
* Add naive method for getting `previous_eth1_distance`
* Add eth1 config params to main config
* Add instructions for setting up eth1 testing environment
* Add build script to fetch deposit contract abi
* Contract ABI is part of compiled binary
* Fix minor bugs
* Move docs to lib
* Add timeout to config
* Remove print statements
* Change warn to error
* Fix typos
* Removed prints in test and get timeout value from config
* Fixed error types
* Added logging to web3_fetcher
* Refactor for modified web3 api
* Fix minor stuff
* Add build script
* Tidy, hide eth1 integration tests behind flag
* Add http crate
* Add first stages of eth1_test_rig
* Fix deposits on test rig
* Fix bug with deposit count method
* Add block hash getter to http eth1
* Clean eth1 http crate and tests
* Add script to start ganache
* Adds deposit tree to eth1-http
* Extend deposit tree tests
* Tidy tests in eth1-http
* Add more detail to get block request
* Add block cache to eth1-http
* Rename deposit tree to deposit cache
* Add inital updating to eth1-http
* Tidy updater
* Fix compile bugs in tests
* Adds an Eth1DataCache builder
* Reorg eth1-http files
* Add (failing) tests for eth1 updater
* Rename files, fix bug in eth1-http
* Ensure that ganache timestamps are increasing
* Fix bugs with getting eth1data ancestors
* Improve eth1 testing, fix bugs
* Add truncate method to block cache
* Add pruning to block cache update process
* Add tests for block pruning
* Allow for dropping an expired cache.
* Add more comments
* Add first compiling version of deposit updater
* Add common fn for getting range of required blocks
* Add passing deposit update test
* Improve tests
* Fix block pruning bug
* Add tests for running two updates at once
* Add updater services to eth1
* Add deposit collection to beacon chain
* Add incomplete builder experiments
* Add first working version of beacon chain builder
* Update test harness to new beacon chain type
* Rename builder file, tidy
* Add first working client builder
* Progress further on client builder
* Update becaon node binary to use client builder
* Ensure release tests compile
* Remove old eth1 crate
* Add first pass of new lighthouse binary
* Fix websocket server startup
* Remove old binary code from beacon_node crate
* Add first working beacon node tests
* Add genesis crate, new eth1 cache_2
* Add Serivce to Eth1Cache
* Refactor with general eth1 improvements
* Add passing genesis test
* Tidy, add comments
* Add more comments to eth1 service
* Add further eth1 progress
* Fix some bugs with genesis
* Fix eth1 bugs, make eth1 linking more efficient
* Shift logic in genesis service
* Add more comments to genesis service
* Add gzip, max request values, timeouts to http
* Update testnet parameters to suit goerli testnet
* Add ability to vary Fork, fix custom spec
* Be more explicit about deposit fork version
* Start adding beacon chain eth1 option
* Add more flexibility to prod client
* Further runtime refactoring
* Allow for starting from store
* Add bootstrapping to client config
* Add remote_beacon_node crate
* Update eth1 service for more configurability
* Update eth1 tests to use less runtimes
* Patch issues with tests using too many files
* Move dummy eth1 backend flag
* Ensure all tests pass
* Add ganache-cli to Dockerfile
* Use a special docker hub image for testing
* Appease clippy
* Move validator client into lighthouse binary
* Allow starting with dummy eth1 backend
* Improve logging
* Fix dummy eth1 backend from cli
* Add extra testnet command
* Ensure consistent spec in beacon node
* Update eth1 rig to work on goerli
* Tidy lcli, start adding support for yaml config
* Add incomplete YamlConfig struct
* Remove efforts at YamlConfig
* Add incomplete eth1 voting. Blocked on spec issues
* Add (untested) first pass at eth1 vote algo
* Add tests for winning vote
* Add more tests for eth1 chain
* Add more eth1 voting tests
* Added more eth1 voting testing
* Change test name
* Add more tests to eth1 chain
* Tidy eth1 generics, add more tests
* Improve comments
* Tidy beacon_node tests
* Tidy, rename JsonRpc.. to Caching..
* Tidy voting logic
* Tidy builder docs
* Add comments, tidy eth1
* Add more comments to eth1
* Fix bug with winning_vote
* Add doc comments to the `ClientBuilder`
* Remove commented-out code
* Improve `ClientBuilder` docs
* Add comments to client config
* Add decoding test for `ClientConfig`
* Remove unused `DepositSet` struct
* Tidy `block_cache`
* Remove commented out lines
* Remove unused code in `eth1` crate
* Remove old validator binary `main.rs`
* Tidy, fix tests compile error
* Add initial tests for get_deposits
* Remove dead code in eth1_test_rig
* Update TestingDepositBuilder
* Add testing for getting eth1 deposits
* Fix duplicate rand dep
* Remove dead code
* Remove accidentally-added files
* Fix comment in eth1_genesis_service
* Add .gitignore for eth1_test_rig
* Fix bug in eth1_genesis_service
* Remove dead code from eth2_config
* Fix tabs/spaces in root Cargo.toml
* Tidy eth1 crate
* Allow for re-use of eth1 service after genesis
* Update docs for new CLI
* Change README gif
* Tidy eth1 http module
* Tidy eth1 service
* Tidy environment crate
* Remove unused file
* Tidy, add comments
* Remove commented-out code
* Address majority of Michael's comments
* Address other PR comments
* Add link to issue alongside TODO
2019-11-15 03:47:51 +00:00
|
|
|
builder::{BeaconChainBuilder, Witness},
|
|
|
|
eth1_chain::CachingEth1Backend,
|
2020-12-04 00:18:58 +00:00
|
|
|
BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler,
|
|
|
|
StateSkipConfig,
|
2019-09-04 00:25:30 +00:00
|
|
|
};
|
2021-07-09 06:15:32 +00:00
|
|
|
use bls::get_withdrawal_credentials;
|
2021-10-02 01:39:11 +00:00
|
|
|
use execution_layer::{
|
2022-07-18 23:15:40 +00:00
|
|
|
auth::JwtKey,
|
2022-07-30 00:22:37 +00:00
|
|
|
test_utils::{
|
2023-01-03 22:42:34 +00:00
|
|
|
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET,
|
|
|
|
DEFAULT_TERMINAL_BLOCK,
|
2022-07-30 00:22:37 +00:00
|
|
|
},
|
2021-10-02 01:39:11 +00:00
|
|
|
ExecutionLayer,
|
|
|
|
};
|
2022-07-25 23:53:26 +00:00
|
|
|
use fork_choice::CountUnrealized;
|
2020-10-01 01:41:58 +00:00
|
|
|
use futures::channel::mpsc::Receiver;
|
2021-10-02 19:57:23 +00:00
|
|
|
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
2021-07-09 06:15:32 +00:00
|
|
|
use int_to_bytes::int_to_bytes32;
|
|
|
|
use merkle_proof::MerkleTree;
|
2020-10-19 05:58:39 +00:00
|
|
|
use parking_lot::Mutex;
|
2021-10-02 01:39:11 +00:00
|
|
|
use parking_lot::RwLockWriteGuard;
|
2020-08-26 09:24:55 +00:00
|
|
|
use rand::rngs::StdRng;
|
|
|
|
use rand::Rng;
|
2021-02-15 20:34:49 +00:00
|
|
|
use rand::SeedableRng;
|
2019-08-29 01:34:25 +00:00
|
|
|
use rayon::prelude::*;
|
2021-10-02 01:39:11 +00:00
|
|
|
use sensitive_url::SensitiveUrl;
|
2021-10-14 02:58:10 +00:00
|
|
|
use slog::Logger;
|
2022-12-13 09:57:26 +00:00
|
|
|
use slot_clock::{SlotClock, TestingSlotClock};
|
2022-07-18 23:15:41 +00:00
|
|
|
use state_processing::per_block_processing::compute_timestamp_at_slot;
|
2022-04-13 03:54:42 +00:00
|
|
|
use state_processing::{
|
|
|
|
state_advance::{complete_state_advance, partial_state_advance},
|
|
|
|
StateRootStrategy,
|
|
|
|
};
|
2020-03-04 06:10:22 +00:00
|
|
|
use std::borrow::Cow;
|
2020-08-26 09:24:55 +00:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2022-07-25 08:23:00 +00:00
|
|
|
use std::fmt;
|
2021-10-16 05:07:23 +00:00
|
|
|
use std::str::FromStr;
|
2019-06-16 06:24:33 +00:00
|
|
|
use std::sync::Arc;
|
Eth1 Integration (#542)
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Initial commit. web3 api working.
* Tidied up lib. Add function for fetching logs.
* Refactor with `Eth1DataFetcher` trait
* Add parsing for deposit contract logs and get_eth1_data function
* Add `get_eth1_votes` function
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Fix merge issue
* Refactor with `Config` struct. Remote `ContractConfig`
* Rename eth1_chain crate to eth1
* Rename files and read abi file using `fs::read`
* Move eth1 to lib
* Remove unnecessary mutability constraint
* Add `Web3Backend` for returning actual eth1 data
* Refactor `get_eth1_votes` to return a Result
* Delete `eth1_chain` crate
* Return `Result` from `get_deposits`
* Fix range of deposits to return to beacon chain
* Add `get_block_height_by_hash` trait function
* Add naive method for getting `previous_eth1_distance`
* Add eth1 config params to main config
* Add instructions for setting up eth1 testing environment
* Add build script to fetch deposit contract abi
* Contract ABI is part of compiled binary
* Fix minor bugs
* Move docs to lib
* Add timeout to config
* Remove print statements
* Change warn to error
* Fix typos
* Removed prints in test and get timeout value from config
* Fixed error types
* Added logging to web3_fetcher
* Refactor for modified web3 api
* Fix minor stuff
* Add build script
* Tidy, hide eth1 integration tests behind flag
* Add http crate
* Add first stages of eth1_test_rig
* Fix deposits on test rig
* Fix bug with deposit count method
* Add block hash getter to http eth1
* Clean eth1 http crate and tests
* Add script to start ganache
* Adds deposit tree to eth1-http
* Extend deposit tree tests
* Tidy tests in eth1-http
* Add more detail to get block request
* Add block cache to eth1-http
* Rename deposit tree to deposit cache
* Add inital updating to eth1-http
* Tidy updater
* Fix compile bugs in tests
* Adds an Eth1DataCache builder
* Reorg eth1-http files
* Add (failing) tests for eth1 updater
* Rename files, fix bug in eth1-http
* Ensure that ganache timestamps are increasing
* Fix bugs with getting eth1data ancestors
* Improve eth1 testing, fix bugs
* Add truncate method to block cache
* Add pruning to block cache update process
* Add tests for block pruning
* Allow for dropping an expired cache.
* Add more comments
* Add first compiling version of deposit updater
* Add common fn for getting range of required blocks
* Add passing deposit update test
* Improve tests
* Fix block pruning bug
* Add tests for running two updates at once
* Add updater services to eth1
* Add deposit collection to beacon chain
* Add incomplete builder experiments
* Add first working version of beacon chain builder
* Update test harness to new beacon chain type
* Rename builder file, tidy
* Add first working client builder
* Progress further on client builder
* Update becaon node binary to use client builder
* Ensure release tests compile
* Remove old eth1 crate
* Add first pass of new lighthouse binary
* Fix websocket server startup
* Remove old binary code from beacon_node crate
* Add first working beacon node tests
* Add genesis crate, new eth1 cache_2
* Add Serivce to Eth1Cache
* Refactor with general eth1 improvements
* Add passing genesis test
* Tidy, add comments
* Add more comments to eth1 service
* Add further eth1 progress
* Fix some bugs with genesis
* Fix eth1 bugs, make eth1 linking more efficient
* Shift logic in genesis service
* Add more comments to genesis service
* Add gzip, max request values, timeouts to http
* Update testnet parameters to suit goerli testnet
* Add ability to vary Fork, fix custom spec
* Be more explicit about deposit fork version
* Start adding beacon chain eth1 option
* Add more flexibility to prod client
* Further runtime refactoring
* Allow for starting from store
* Add bootstrapping to client config
* Add remote_beacon_node crate
* Update eth1 service for more configurability
* Update eth1 tests to use less runtimes
* Patch issues with tests using too many files
* Move dummy eth1 backend flag
* Ensure all tests pass
* Add ganache-cli to Dockerfile
* Use a special docker hub image for testing
* Appease clippy
* Move validator client into lighthouse binary
* Allow starting with dummy eth1 backend
* Improve logging
* Fix dummy eth1 backend from cli
* Add extra testnet command
* Ensure consistent spec in beacon node
* Update eth1 rig to work on goerli
* Tidy lcli, start adding support for yaml config
* Add incomplete YamlConfig struct
* Remove efforts at YamlConfig
* Add incomplete eth1 voting. Blocked on spec issues
* Add (untested) first pass at eth1 vote algo
* Add tests for winning vote
* Add more tests for eth1 chain
* Add more eth1 voting tests
* Added more eth1 voting testing
* Change test name
* Add more tests to eth1 chain
* Tidy eth1 generics, add more tests
* Improve comments
* Tidy beacon_node tests
* Tidy, rename JsonRpc.. to Caching..
* Tidy voting logic
* Tidy builder docs
* Add comments, tidy eth1
* Add more comments to eth1
* Fix bug with winning_vote
* Add doc comments to the `ClientBuilder`
* Remove commented-out code
* Improve `ClientBuilder` docs
* Add comments to client config
* Add decoding test for `ClientConfig`
* Remove unused `DepositSet` struct
* Tidy `block_cache`
* Remove commented out lines
* Remove unused code in `eth1` crate
* Remove old validator binary `main.rs`
* Tidy, fix tests compile error
* Add initial tests for get_deposits
* Remove dead code in eth1_test_rig
* Update TestingDepositBuilder
* Add testing for getting eth1 deposits
* Fix duplicate rand dep
* Remove dead code
* Remove accidentally-added files
* Fix comment in eth1_genesis_service
* Add .gitignore for eth1_test_rig
* Fix bug in eth1_genesis_service
* Remove dead code from eth2_config
* Fix tabs/spaces in root Cargo.toml
* Tidy eth1 crate
* Allow for re-use of eth1 service after genesis
* Update docs for new CLI
* Change README gif
* Tidy eth1 http module
* Tidy eth1 service
* Tidy environment crate
* Remove unused file
* Tidy, add comments
* Remove commented-out code
* Address majority of Michael's comments
* Address other PR comments
* Add link to issue alongside TODO
2019-11-15 03:47:51 +00:00
|
|
|
use std::time::Duration;
|
2021-12-21 06:30:52 +00:00
|
|
|
use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
|
2022-05-16 08:35:59 +00:00
|
|
|
use task_executor::{test_utils::TestRuntime, ShutdownReason};
|
2020-04-20 09:59:56 +00:00
|
|
|
use tree_hash::TreeHash;
|
2021-07-15 00:52:02 +00:00
|
|
|
use types::sync_selection_proof::SyncSelectionProof;
|
|
|
|
pub use types::test_utils::generate_deterministic_keypairs;
|
2022-04-13 03:54:42 +00:00
|
|
|
use types::{typenum::U4294967296, *};
|
2019-06-16 06:24:33 +00:00
|
|
|
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
// 4th September 2019
|
|
|
|
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
|
2021-07-09 06:15:32 +00:00
|
|
|
// Environment variable to read if `fork_from_env` feature is enabled.
|
|
|
|
const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
2019-09-04 00:25:30 +00:00
|
|
|
|
2021-08-30 06:41:31 +00:00
|
|
|
// Default target aggregators to set during testing, this ensures an aggregator at each slot.
|
|
|
|
//
|
|
|
|
// You should mutate the `ChainSpec` prior to initialising the harness if you would like to use
|
|
|
|
// a different value.
|
|
|
|
pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::max_value();
|
|
|
|
|
2020-12-04 00:18:58 +00:00
|
|
|
pub type BaseHarnessType<TEthSpec, THotStore, TColdStore> =
|
|
|
|
Witness<TestingSlotClock, CachingEth1Backend<TEthSpec>, TEthSpec, THotStore, TColdStore>;
|
Eth1 Integration (#542)
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Initial commit. web3 api working.
* Tidied up lib. Add function for fetching logs.
* Refactor with `Eth1DataFetcher` trait
* Add parsing for deposit contract logs and get_eth1_data function
* Add `get_eth1_votes` function
* Refactor to cache Eth1Data
* Fix merge conflicts and minor refactorings
* Rename Eth1Cache to Eth1DataCache
* Refactor events subscription
* Add deposits module to interface with BeaconChain deposits
* Remove utils
* Rename to types.rs and add trait constraints to Eth1DataFetcher
* Confirm to trait constraints. Make Web3DataFetcher cloneable
* Make fetcher object member of deposit and eth1_data cache and other fixes
* Fix update_cache function
* Move fetch_eth1_data to impl block
* Fix deposit tests
* Create Eth1 object for interfacing with Beacon chain
* Add `run` function for running update_cache and subscribe_deposit_logs tasks
* Add logging
* Run `cargo fmt` and make tests pass
* Convert sync functions to async
* Add timeouts to web3 functions
* Return futures from cache functions
* Add failed chaining of futures
* Working cache updation
* Clean up tests and `update_cache` function
* Refactor `get_eth1_data` functions to work with future returning functions
* Refactor eth1 `run` function to work with modified `update_cache` api
* Minor changes
* Add distance parameter to `update_cache`
* Fix tests and other minor fixes
* Working integration with cache and deposits
* Add merkle_tree construction, proof generation and verification code
* Add function to construct and fetch Deposits for BeaconNode
* Add error handling
* Import ssz
* Add error handling to eth1 cache and fix minor errors
* Run rustfmt
* Fix minor bug
* Rename Eth1Error and change to Result<T>
* Change deposit fetching mechanism from notification based to poll based
* Add deposits from eth1 chain in a given range every `x` blocks
* Modify `run` function to accommodate changes
* Minor fixes
* Fix formatting
* Fix merge issue
* Refactor with `Config` struct. Remote `ContractConfig`
* Rename eth1_chain crate to eth1
* Rename files and read abi file using `fs::read`
* Move eth1 to lib
* Remove unnecessary mutability constraint
* Add `Web3Backend` for returning actual eth1 data
* Refactor `get_eth1_votes` to return a Result
* Delete `eth1_chain` crate
* Return `Result` from `get_deposits`
* Fix range of deposits to return to beacon chain
* Add `get_block_height_by_hash` trait function
* Add naive method for getting `previous_eth1_distance`
* Add eth1 config params to main config
* Add instructions for setting up eth1 testing environment
* Add build script to fetch deposit contract abi
* Contract ABI is part of compiled binary
* Fix minor bugs
* Move docs to lib
* Add timeout to config
* Remove print statements
* Change warn to error
* Fix typos
* Removed prints in test and get timeout value from config
* Fixed error types
* Added logging to web3_fetcher
* Refactor for modified web3 api
* Fix minor stuff
* Add build script
* Tidy, hide eth1 integration tests behind flag
* Add http crate
* Add first stages of eth1_test_rig
* Fix deposits on test rig
* Fix bug with deposit count method
* Add block hash getter to http eth1
* Clean eth1 http crate and tests
* Add script to start ganache
* Adds deposit tree to eth1-http
* Extend deposit tree tests
* Tidy tests in eth1-http
* Add more detail to get block request
* Add block cache to eth1-http
* Rename deposit tree to deposit cache
* Add inital updating to eth1-http
* Tidy updater
* Fix compile bugs in tests
* Adds an Eth1DataCache builder
* Reorg eth1-http files
* Add (failing) tests for eth1 updater
* Rename files, fix bug in eth1-http
* Ensure that ganache timestamps are increasing
* Fix bugs with getting eth1data ancestors
* Improve eth1 testing, fix bugs
* Add truncate method to block cache
* Add pruning to block cache update process
* Add tests for block pruning
* Allow for dropping an expired cache.
* Add more comments
* Add first compiling version of deposit updater
* Add common fn for getting range of required blocks
* Add passing deposit update test
* Improve tests
* Fix block pruning bug
* Add tests for running two updates at once
* Add updater services to eth1
* Add deposit collection to beacon chain
* Add incomplete builder experiments
* Add first working version of beacon chain builder
* Update test harness to new beacon chain type
* Rename builder file, tidy
* Add first working client builder
* Progress further on client builder
* Update becaon node binary to use client builder
* Ensure release tests compile
* Remove old eth1 crate
* Add first pass of new lighthouse binary
* Fix websocket server startup
* Remove old binary code from beacon_node crate
* Add first working beacon node tests
* Add genesis crate, new eth1 cache_2
* Add Serivce to Eth1Cache
* Refactor with general eth1 improvements
* Add passing genesis test
* Tidy, add comments
* Add more comments to eth1 service
* Add further eth1 progress
* Fix some bugs with genesis
* Fix eth1 bugs, make eth1 linking more efficient
* Shift logic in genesis service
* Add more comments to genesis service
* Add gzip, max request values, timeouts to http
* Update testnet parameters to suit goerli testnet
* Add ability to vary Fork, fix custom spec
* Be more explicit about deposit fork version
* Start adding beacon chain eth1 option
* Add more flexibility to prod client
* Further runtime refactoring
* Allow for starting from store
* Add bootstrapping to client config
* Add remote_beacon_node crate
* Update eth1 service for more configurability
* Update eth1 tests to use less runtimes
* Patch issues with tests using too many files
* Move dummy eth1 backend flag
* Ensure all tests pass
* Add ganache-cli to Dockerfile
* Use a special docker hub image for testing
* Appease clippy
* Move validator client into lighthouse binary
* Allow starting with dummy eth1 backend
* Improve logging
* Fix dummy eth1 backend from cli
* Add extra testnet command
* Ensure consistent spec in beacon node
* Update eth1 rig to work on goerli
* Tidy lcli, start adding support for yaml config
* Add incomplete YamlConfig struct
* Remove efforts at YamlConfig
* Add incomplete eth1 voting. Blocked on spec issues
* Add (untested) first pass at eth1 vote algo
* Add tests for winning vote
* Add more tests for eth1 chain
* Add more eth1 voting tests
* Added more eth1 voting testing
* Change test name
* Add more tests to eth1 chain
* Tidy eth1 generics, add more tests
* Improve comments
* Tidy beacon_node tests
* Tidy, rename JsonRpc.. to Caching..
* Tidy voting logic
* Tidy builder docs
* Add comments, tidy eth1
* Add more comments to eth1
* Fix bug with winning_vote
* Add doc comments to the `ClientBuilder`
* Remove commented-out code
* Improve `ClientBuilder` docs
* Add comments to client config
* Add decoding test for `ClientConfig`
* Remove unused `DepositSet` struct
* Tidy `block_cache`
* Remove commented out lines
* Remove unused code in `eth1` crate
* Remove old validator binary `main.rs`
* Tidy, fix tests compile error
* Add initial tests for get_deposits
* Remove dead code in eth1_test_rig
* Update TestingDepositBuilder
* Add testing for getting eth1 deposits
* Fix duplicate rand dep
* Remove dead code
* Remove accidentally-added files
* Fix comment in eth1_genesis_service
* Add .gitignore for eth1_test_rig
* Fix bug in eth1_genesis_service
* Remove dead code from eth2_config
* Fix tabs/spaces in root Cargo.toml
* Tidy eth1 crate
* Allow for re-use of eth1 service after genesis
* Update docs for new CLI
* Change README gif
* Tidy eth1 http module
* Tidy eth1 service
* Tidy environment crate
* Remove unused file
* Tidy, add comments
* Remove commented-out code
* Address majority of Michael's comments
* Address other PR comments
* Add link to issue alongside TODO
2019-11-15 03:47:51 +00:00
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
pub type DiskHarnessType<E> = BaseHarnessType<E, LevelDB<E>, LevelDB<E>>;
|
|
|
|
pub type EphemeralHarnessType<E> = BaseHarnessType<E, MemoryStore<E>, MemoryStore<E>>;
|
2020-08-26 09:24:55 +00:00
|
|
|
|
Run fork choice before block proposal (#3168)
## Issue Addressed
Upcoming spec change https://github.com/ethereum/consensus-specs/pull/2878
## Proposed Changes
1. Run fork choice at the start of every slot, and wait for this run to complete before proposing a block.
2. As an optimisation, also run fork choice 3/4 of the way through the slot (at 9s), _dequeueing attestations for the next slot_.
3. Remove the fork choice run from the state advance timer that occurred before advancing the state.
## Additional Info
### Block Proposal Accuracy
This change makes us more likely to propose on top of the correct head in the presence of re-orgs with proposer boost in play. The main scenario that this change is designed to address is described in the linked spec issue.
### Attestation Accuracy
This change _also_ makes us more likely to attest to the correct head. Currently in the case of a skipped slot at `slot` we only run fork choice 9s into `slot - 1`. This means the attestations from `slot - 1` aren't taken into consideration, and any boost applied to the block from `slot - 1` is not removed (it should be). In the language of the linked spec issue, this means we are liable to attest to C, even when the majority voting weight has already caused a re-org to B.
### Why remove the call before the state advance?
If we've run fork choice at the start of the slot then it has already dequeued all the attestations from the previous slot, which are the only ones eligible to influence the head in the current slot. Running fork choice again is unnecessary (unless we run it for the next slot and try to pre-empt a re-org, but I don't currently think this is a great idea).
### Performance
Based on Prater testing this adds about 5-25ms of runtime to block proposal times, which are 500-1000ms on average (and spike to 5s+ sometimes due to state handling issues :cry: ). I believe this is a small enough penalty to enable it by default, with the option to disable it via the new flag `--fork-choice-before-proposal-timeout 0`. Upcoming work on block packing and state representation will also reduce block production times in general, while removing the spikes.
### Implementation
Fork choice gets invoked at the start of the slot via the `per_slot_task` function called from the slot timer. It then uses a condition variable to signal to block production that fork choice has been updated. This is a bit funky, but it seems to work. One downside of the timer-based approach is that it doesn't happen automatically in most of the tests. The test added by this PR has to trigger the run manually.
2022-05-20 05:02:11 +00:00
|
|
|
pub type BoxedMutator<E, Hot, Cold> = Box<
|
2021-10-14 02:58:10 +00:00
|
|
|
dyn FnOnce(
|
|
|
|
BeaconChainBuilder<BaseHarnessType<E, Hot, Cold>>,
|
|
|
|
) -> BeaconChainBuilder<BaseHarnessType<E, Hot, Cold>>,
|
|
|
|
>;
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub type AddBlocksResult<E> = (
|
|
|
|
HashMap<Slot, SignedBeaconBlockHash>,
|
|
|
|
HashMap<Slot, BeaconStateHash>,
|
|
|
|
SignedBeaconBlockHash,
|
|
|
|
BeaconState<E>,
|
|
|
|
);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks.
|
2019-06-20 00:59:19 +00:00
|
|
|
#[derive(Clone, Copy, Debug)]
|
2019-06-21 01:55:37 +00:00
|
|
|
pub enum BlockStrategy {
|
|
|
|
/// Produce blocks upon the canonical head (normal case).
|
2019-06-18 17:01:58 +00:00
|
|
|
OnCanonicalHead,
|
2019-06-21 01:55:37 +00:00
|
|
|
/// Ignore the canonical head and produce blocks upon the block at the given slot.
|
|
|
|
///
|
|
|
|
/// Useful for simulating forks.
|
2019-06-23 00:05:12 +00:00
|
|
|
ForkCanonicalChainAt {
|
|
|
|
/// The slot of the parent of the first block produced.
|
|
|
|
previous_slot: Slot,
|
|
|
|
/// The slot of the first block produced (must be higher than `previous_slot`.
|
|
|
|
first_slot: Slot,
|
|
|
|
},
|
2019-06-18 17:01:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations.
|
2019-06-21 08:54:37 +00:00
|
|
|
#[derive(Clone, Debug)]
|
2019-06-21 01:44:15 +00:00
|
|
|
pub enum AttestationStrategy {
|
2019-06-21 01:55:37 +00:00
|
|
|
/// All validators attest to whichever block the `BeaconChainHarness` has produced.
|
2019-06-21 01:44:15 +00:00
|
|
|
AllValidators,
|
2019-06-21 08:54:37 +00:00
|
|
|
/// Only the given validators should attest. All others should fail to produce attestations.
|
|
|
|
SomeValidators(Vec<usize>),
|
2019-06-21 01:44:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or
|
|
|
|
/// `state.next_sync_committee` when creating sync messages or contributions.
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub enum RelativeSyncCommittee {
|
|
|
|
Current,
|
|
|
|
Next,
|
|
|
|
}
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
fn make_rng() -> Mutex<StdRng> {
|
2020-08-26 09:24:55 +00:00
|
|
|
// Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary
|
|
|
|
// but fixed value for reproducibility.
|
2020-10-19 05:58:39 +00:00
|
|
|
Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64))
|
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
/// Return a `ChainSpec` suitable for test usage.
|
|
|
|
///
|
|
|
|
/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment
|
|
|
|
/// variable. Otherwise use the default spec.
|
|
|
|
pub fn test_spec<E: EthSpec>() -> ChainSpec {
|
2021-08-30 06:41:31 +00:00
|
|
|
let mut spec = if cfg!(feature = "fork_from_env") {
|
2023-01-05 19:00:44 +00:00
|
|
|
let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| {
|
|
|
|
panic!(
|
|
|
|
"{} env var must be defined when using fork_from_env: {:?}",
|
|
|
|
FORK_NAME_ENV_VAR, e
|
|
|
|
)
|
|
|
|
});
|
|
|
|
let fork = ForkName::from_str(fork_name.as_str()).unwrap();
|
2021-07-09 06:15:32 +00:00
|
|
|
fork.make_genesis_spec(E::default_spec())
|
|
|
|
} else {
|
|
|
|
E::default_spec()
|
2021-08-30 06:41:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Set target aggregators to a high value by default.
|
|
|
|
spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS;
|
|
|
|
spec
|
2021-07-09 06:15:32 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub struct Builder<T: BeaconChainTypes> {
|
|
|
|
eth_spec_instance: T::EthSpec,
|
|
|
|
spec: Option<ChainSpec>,
|
|
|
|
validator_keypairs: Option<Vec<Keypair>>,
|
|
|
|
chain_config: Option<ChainConfig>,
|
|
|
|
store_config: Option<StoreConfig>,
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
|
|
|
|
initial_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>,
|
|
|
|
store_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>,
|
2022-07-01 01:15:19 +00:00
|
|
|
execution_layer: Option<ExecutionLayer<T::EthSpec>>,
|
2021-10-02 01:39:11 +00:00
|
|
|
mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
|
2022-07-30 00:22:37 +00:00
|
|
|
mock_builder: Option<TestingBuilder<T::EthSpec>>,
|
2022-08-05 23:41:09 +00:00
|
|
|
testing_slot_clock: Option<TestingSlotClock>,
|
2022-05-16 08:35:59 +00:00
|
|
|
runtime: TestRuntime,
|
2021-10-14 02:58:10 +00:00
|
|
|
log: Logger,
|
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
impl<E: EthSpec> Builder<EphemeralHarnessType<E>> {
|
|
|
|
pub fn fresh_ephemeral_store(mut self) -> Self {
|
|
|
|
let spec = self.spec.as_ref().expect("cannot build without spec");
|
|
|
|
let validator_keypairs = self
|
|
|
|
.validator_keypairs
|
|
|
|
.clone()
|
|
|
|
.expect("cannot build without validator keypairs");
|
|
|
|
|
|
|
|
let store = Arc::new(
|
|
|
|
HotColdDB::open_ephemeral(
|
|
|
|
self.store_config.clone().unwrap_or_default(),
|
|
|
|
spec.clone(),
|
|
|
|
self.log.clone(),
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
let mutator = move |builder: BeaconChainBuilder<_>| {
|
|
|
|
let genesis_state = interop_genesis_state::<E>(
|
|
|
|
&validator_keypairs,
|
|
|
|
HARNESS_GENESIS_TIME,
|
2021-10-02 19:57:23 +00:00
|
|
|
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
2021-10-08 00:51:35 +00:00
|
|
|
None,
|
2021-10-14 02:58:10 +00:00
|
|
|
builder.get_spec(),
|
|
|
|
)
|
|
|
|
.expect("should generate interop state");
|
|
|
|
builder
|
|
|
|
.genesis_state(genesis_state)
|
|
|
|
.expect("should build state using recent genesis")
|
|
|
|
};
|
|
|
|
self.store = Some(store);
|
|
|
|
self.store_mutator(Box::new(mutator))
|
|
|
|
}
|
2021-11-08 07:29:04 +00:00
|
|
|
|
|
|
|
/// Create a new ephemeral store that uses the specified `genesis_state`.
|
|
|
|
pub fn genesis_state_ephemeral_store(mut self, genesis_state: BeaconState<E>) -> Self {
|
|
|
|
let spec = self.spec.as_ref().expect("cannot build without spec");
|
|
|
|
|
|
|
|
let store = Arc::new(
|
|
|
|
HotColdDB::open_ephemeral(
|
|
|
|
self.store_config.clone().unwrap_or_default(),
|
|
|
|
spec.clone(),
|
|
|
|
self.log.clone(),
|
|
|
|
)
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
let mutator = move |builder: BeaconChainBuilder<_>| {
|
|
|
|
builder
|
|
|
|
.genesis_state(genesis_state)
|
|
|
|
.expect("should build state using recent genesis")
|
|
|
|
};
|
|
|
|
self.store = Some(store);
|
|
|
|
self.store_mutator(Box::new(mutator))
|
|
|
|
}
|
2022-07-28 13:57:09 +00:00
|
|
|
|
|
|
|
/// Manually restore from a given `MemoryStore`.
|
|
|
|
pub fn resumed_ephemeral_store(
|
|
|
|
mut self,
|
|
|
|
store: Arc<HotColdDB<E, MemoryStore<E>, MemoryStore<E>>>,
|
|
|
|
) -> Self {
|
|
|
|
let mutator = move |builder: BeaconChainBuilder<_>| {
|
|
|
|
builder
|
|
|
|
.resume_from_db()
|
|
|
|
.expect("should resume from database")
|
|
|
|
};
|
|
|
|
self.store = Some(store);
|
|
|
|
self.store_mutator(Box::new(mutator))
|
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
impl<E: EthSpec> Builder<DiskHarnessType<E>> {
|
|
|
|
/// Disk store, start from genesis.
|
|
|
|
pub fn fresh_disk_store(mut self, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) -> Self {
|
|
|
|
let validator_keypairs = self
|
|
|
|
.validator_keypairs
|
|
|
|
.clone()
|
|
|
|
.expect("cannot build without validator keypairs");
|
|
|
|
|
|
|
|
let mutator = move |builder: BeaconChainBuilder<_>| {
|
|
|
|
let genesis_state = interop_genesis_state::<E>(
|
|
|
|
&validator_keypairs,
|
|
|
|
HARNESS_GENESIS_TIME,
|
2021-10-02 19:57:23 +00:00
|
|
|
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
|
2021-10-08 00:51:35 +00:00
|
|
|
None,
|
2021-10-14 02:58:10 +00:00
|
|
|
builder.get_spec(),
|
|
|
|
)
|
|
|
|
.expect("should generate interop state");
|
|
|
|
builder
|
|
|
|
.genesis_state(genesis_state)
|
|
|
|
.expect("should build state using recent genesis")
|
|
|
|
};
|
|
|
|
self.store = Some(store);
|
|
|
|
self.store_mutator(Box::new(mutator))
|
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
/// Disk store, resume.
|
|
|
|
pub fn resumed_disk_store(mut self, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) -> Self {
|
|
|
|
let mutator = move |builder: BeaconChainBuilder<_>| {
|
|
|
|
builder
|
|
|
|
.resume_from_db()
|
|
|
|
.expect("should resume from database")
|
|
|
|
};
|
|
|
|
self.store = Some(store);
|
|
|
|
self.store_mutator(Box::new(mutator))
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 00:52:02 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
impl<E, Hot, Cold> Builder<BaseHarnessType<E, Hot, Cold>>
|
|
|
|
where
|
|
|
|
E: EthSpec,
|
|
|
|
Hot: ItemStore<E>,
|
|
|
|
Cold: ItemStore<E>,
|
|
|
|
{
|
|
|
|
pub fn new(eth_spec_instance: E) -> Self {
|
2022-05-16 08:35:59 +00:00
|
|
|
let runtime = TestRuntime::default();
|
|
|
|
let log = runtime.log.clone();
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
Self {
|
2020-10-19 05:58:39 +00:00
|
|
|
eth_spec_instance,
|
2021-10-14 02:58:10 +00:00
|
|
|
spec: None,
|
|
|
|
validator_keypairs: None,
|
|
|
|
chain_config: None,
|
|
|
|
store_config: None,
|
|
|
|
store: None,
|
|
|
|
initial_mutator: None,
|
|
|
|
store_mutator: None,
|
2021-10-02 01:39:11 +00:00
|
|
|
execution_layer: None,
|
|
|
|
mock_execution_layer: None,
|
2022-07-30 00:22:37 +00:00
|
|
|
mock_builder: None,
|
2022-08-05 23:41:09 +00:00
|
|
|
testing_slot_clock: None,
|
2022-05-16 08:35:59 +00:00
|
|
|
runtime,
|
|
|
|
log,
|
2021-10-14 02:58:10 +00:00
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn deterministic_keypairs(self, num_keypairs: usize) -> Self {
|
|
|
|
self.keypairs(types::test_utils::generate_deterministic_keypairs(
|
|
|
|
num_keypairs,
|
|
|
|
))
|
2020-10-01 01:41:58 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn keypairs(mut self, validator_keypairs: Vec<Keypair>) -> Self {
|
|
|
|
self.validator_keypairs = Some(validator_keypairs);
|
|
|
|
self
|
2021-08-06 00:47:31 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn default_spec(self) -> Self {
|
|
|
|
self.spec_or_default(None)
|
2019-06-16 06:24:33 +00:00
|
|
|
}
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn spec(self, spec: ChainSpec) -> Self {
|
|
|
|
self.spec_or_default(Some(spec))
|
|
|
|
}
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn spec_or_default(mut self, spec: Option<ChainSpec>) -> Self {
|
|
|
|
self.spec = Some(spec.unwrap_or_else(test_spec::<E>));
|
|
|
|
self
|
|
|
|
}
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
pub fn logger(mut self, log: Logger) -> Self {
|
|
|
|
self.log = log.clone();
|
|
|
|
self.runtime.set_logger(log);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
/// This mutator will be run before the `store_mutator`.
|
|
|
|
pub fn initial_mutator(mut self, mutator: BoxedMutator<E, Hot, Cold>) -> Self {
|
|
|
|
assert!(
|
|
|
|
self.initial_mutator.is_none(),
|
|
|
|
"initial mutator already set"
|
|
|
|
);
|
|
|
|
self.initial_mutator = Some(mutator);
|
|
|
|
self
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
/// This mutator will be run after the `initial_mutator`.
|
|
|
|
pub fn store_mutator(mut self, mutator: BoxedMutator<E, Hot, Cold>) -> Self {
|
|
|
|
assert!(self.store_mutator.is_none(), "store mutator already set");
|
|
|
|
self.store_mutator = Some(mutator);
|
|
|
|
self
|
|
|
|
}
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
/// Purposefully replace the `store_mutator`.
|
|
|
|
pub fn override_store_mutator(mut self, mutator: BoxedMutator<E, Hot, Cold>) -> Self {
|
|
|
|
assert!(self.store_mutator.is_some(), "store mutator not set");
|
|
|
|
self.store_mutator = Some(mutator);
|
|
|
|
self
|
|
|
|
}
|
2021-08-30 06:41:31 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn chain_config(mut self, chain_config: ChainConfig) -> Self {
|
|
|
|
self.chain_config = Some(chain_config);
|
|
|
|
self
|
2021-08-30 06:41:31 +00:00
|
|
|
}
|
|
|
|
|
2021-10-02 01:39:11 +00:00
|
|
|
pub fn execution_layer(mut self, urls: &[&str]) -> Self {
|
|
|
|
assert!(
|
|
|
|
self.execution_layer.is_none(),
|
|
|
|
"execution layer already defined"
|
|
|
|
);
|
|
|
|
|
2022-03-08 06:46:24 +00:00
|
|
|
let urls: Vec<SensitiveUrl> = urls
|
2021-10-02 01:39:11 +00:00
|
|
|
.iter()
|
2022-11-04 07:43:43 +00:00
|
|
|
.map(|s| SensitiveUrl::parse(s))
|
2021-10-02 01:39:11 +00:00
|
|
|
.collect::<Result<_, _>>()
|
|
|
|
.unwrap();
|
2022-03-08 06:46:24 +00:00
|
|
|
|
2023-01-12 04:15:08 +00:00
|
|
|
let spec = MainnetEthSpec::default_spec();
|
2022-03-08 06:46:24 +00:00
|
|
|
let config = execution_layer::Config {
|
|
|
|
execution_endpoints: urls,
|
|
|
|
secret_files: vec![],
|
|
|
|
suggested_fee_recipient: Some(Address::repeat_byte(42)),
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
let execution_layer = ExecutionLayer::from_config(
|
|
|
|
config,
|
2022-05-16 08:35:59 +00:00
|
|
|
self.runtime.task_executor.clone(),
|
|
|
|
self.log.clone(),
|
2023-01-12 04:15:08 +00:00
|
|
|
&spec,
|
2021-10-02 01:39:11 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
self.execution_layer = Some(execution_layer);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2023-01-04 21:14:43 +00:00
|
|
|
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
|
|
|
|
let mock = self
|
|
|
|
.mock_execution_layer
|
|
|
|
.as_mut()
|
|
|
|
.expect("must have mock execution layer to recalculate fork times");
|
|
|
|
let spec = self
|
|
|
|
.spec
|
|
|
|
.clone()
|
|
|
|
.expect("cannot recalculate fork times without spec");
|
|
|
|
mock.server.execution_block_generator().shanghai_time =
|
|
|
|
spec.capella_fork_epoch.map(|epoch| {
|
|
|
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
|
|
|
mock.server.execution_block_generator().eip4844_time =
|
|
|
|
spec.eip4844_fork_epoch.map(|epoch| {
|
|
|
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
|
|
|
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:39:11 +00:00
|
|
|
pub fn mock_execution_layer(mut self) -> Self {
|
|
|
|
let spec = self.spec.clone().expect("cannot build without spec");
|
2023-01-03 22:42:34 +00:00
|
|
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
|
|
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
|
|
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
|
|
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
2021-10-02 01:39:11 +00:00
|
|
|
let mock = MockExecutionLayer::new(
|
2022-05-16 08:35:59 +00:00
|
|
|
self.runtime.task_executor.clone(),
|
2021-10-02 01:39:11 +00:00
|
|
|
DEFAULT_TERMINAL_BLOCK,
|
2023-01-03 22:42:34 +00:00
|
|
|
shanghai_time,
|
|
|
|
eip4844_time,
|
2022-07-18 23:15:40 +00:00
|
|
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
2023-01-12 04:15:08 +00:00
|
|
|
spec,
|
2022-07-01 01:15:19 +00:00
|
|
|
None,
|
2021-10-02 01:39:11 +00:00
|
|
|
);
|
|
|
|
self.execution_layer = Some(mock.el.clone());
|
|
|
|
self.mock_execution_layer = Some(mock);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2022-07-30 00:22:37 +00:00
|
|
|
pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self {
|
|
|
|
// Get a random unused port
|
|
|
|
let port = unused_port::unused_tcp_port().unwrap();
|
|
|
|
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
|
|
|
|
|
|
|
|
let spec = self.spec.clone().expect("cannot build without spec");
|
2023-01-03 22:42:34 +00:00
|
|
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
|
|
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
|
|
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
|
|
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
|
|
|
});
|
2022-07-30 00:22:37 +00:00
|
|
|
let mock_el = MockExecutionLayer::new(
|
|
|
|
self.runtime.task_executor.clone(),
|
|
|
|
DEFAULT_TERMINAL_BLOCK,
|
2023-01-03 22:42:34 +00:00
|
|
|
shanghai_time,
|
|
|
|
eip4844_time,
|
2022-07-30 00:22:37 +00:00
|
|
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
2023-01-12 04:15:08 +00:00
|
|
|
spec.clone(),
|
2022-07-30 00:22:37 +00:00
|
|
|
Some(builder_url.clone()),
|
|
|
|
)
|
|
|
|
.move_to_terminal_block();
|
|
|
|
|
|
|
|
let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap();
|
|
|
|
|
|
|
|
self.mock_builder = Some(TestingBuilder::new(
|
|
|
|
mock_el_url,
|
|
|
|
builder_url,
|
|
|
|
beacon_url,
|
|
|
|
spec,
|
|
|
|
self.runtime.task_executor.clone(),
|
|
|
|
));
|
|
|
|
self.execution_layer = Some(mock_el.el.clone());
|
|
|
|
self.mock_execution_layer = Some(mock_el);
|
|
|
|
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-11-15 00:26:42 +00:00
|
|
|
/// Instruct the mock execution engine to always return a "valid" response to any payload it is
|
|
|
|
/// asked to execute.
|
|
|
|
pub fn mock_execution_layer_all_payloads_valid(self) -> Self {
|
|
|
|
self.mock_execution_layer
|
|
|
|
.as_ref()
|
|
|
|
.expect("requires mock execution layer")
|
|
|
|
.server
|
|
|
|
.all_payloads_valid();
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2022-08-05 23:41:09 +00:00
|
|
|
pub fn testing_slot_clock(mut self, slot_clock: TestingSlotClock) -> Self {
|
|
|
|
self.testing_slot_clock = Some(slot_clock);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub fn build(self) -> BeaconChainHarness<BaseHarnessType<E, Hot, Cold>> {
|
2020-10-01 01:41:58 +00:00
|
|
|
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
|
2022-05-16 08:35:59 +00:00
|
|
|
let log = self.log;
|
2021-10-14 02:58:10 +00:00
|
|
|
let spec = self.spec.expect("cannot build without spec");
|
2021-11-08 07:29:04 +00:00
|
|
|
let seconds_per_slot = spec.seconds_per_slot;
|
2021-10-14 02:58:10 +00:00
|
|
|
let validator_keypairs = self
|
|
|
|
.validator_keypairs
|
|
|
|
.expect("cannot build without validator keypairs");
|
2021-08-30 06:41:31 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
let mut builder = BeaconChainBuilder::new(self.eth_spec_instance)
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
.logger(log.clone())
|
2020-01-21 07:38:56 +00:00
|
|
|
.custom_spec(spec)
|
2021-10-14 02:58:10 +00:00
|
|
|
.store(self.store.expect("cannot build without store"))
|
2020-10-19 05:58:39 +00:00
|
|
|
.store_migrator_config(MigratorConfig::default().blocking())
|
2022-05-16 08:35:59 +00:00
|
|
|
.task_executor(self.runtime.task_executor.clone())
|
2021-10-02 01:39:11 +00:00
|
|
|
.execution_layer(self.execution_layer)
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
.dummy_eth1_backend()
|
|
|
|
.expect("should build dummy backend")
|
2020-10-01 01:41:58 +00:00
|
|
|
.shutdown_sender(shutdown_tx)
|
2021-10-14 02:58:10 +00:00
|
|
|
.chain_config(self.chain_config.unwrap_or_default())
|
2021-08-30 06:41:31 +00:00
|
|
|
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
|
|
|
log.clone(),
|
2021-09-25 07:53:58 +00:00
|
|
|
5,
|
2021-08-30 06:41:31 +00:00
|
|
|
)))
|
|
|
|
.monitor_validators(true, vec![], log);
|
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
builder = if let Some(mutator) = self.initial_mutator {
|
|
|
|
mutator(builder)
|
|
|
|
} else {
|
|
|
|
builder
|
|
|
|
};
|
|
|
|
|
|
|
|
builder = if let Some(mutator) = self.store_mutator {
|
|
|
|
mutator(builder)
|
|
|
|
} else {
|
|
|
|
builder
|
|
|
|
};
|
2021-08-30 06:41:31 +00:00
|
|
|
|
|
|
|
// Initialize the slot clock only if it hasn't already been initialized.
|
2022-08-05 23:41:09 +00:00
|
|
|
builder = if let Some(testing_slot_clock) = self.testing_slot_clock {
|
|
|
|
builder.slot_clock(testing_slot_clock)
|
|
|
|
} else if builder.get_slot_clock().is_none() {
|
2021-08-30 06:41:31 +00:00
|
|
|
builder
|
2021-11-08 07:29:04 +00:00
|
|
|
.testing_slot_clock(Duration::from_secs(seconds_per_slot))
|
2021-08-30 06:41:31 +00:00
|
|
|
.expect("should configure testing slot clock")
|
|
|
|
} else {
|
|
|
|
builder
|
|
|
|
};
|
|
|
|
|
|
|
|
let chain = builder.build().expect("should build");
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
BeaconChainHarness {
|
2019-11-26 23:54:46 +00:00
|
|
|
spec: chain.spec.clone(),
|
2021-08-06 00:47:31 +00:00
|
|
|
chain: Arc::new(chain),
|
2020-10-19 05:58:39 +00:00
|
|
|
validator_keypairs,
|
2022-02-28 22:07:48 +00:00
|
|
|
shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)),
|
2022-05-16 08:35:59 +00:00
|
|
|
runtime: self.runtime,
|
2021-10-02 01:39:11 +00:00
|
|
|
mock_execution_layer: self.mock_execution_layer,
|
2022-07-30 00:22:37 +00:00
|
|
|
mock_builder: self.mock_builder.map(Arc::new),
|
2020-08-26 09:24:55 +00:00
|
|
|
rng: make_rng(),
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
}
|
2021-10-14 02:58:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
|
|
|
|
/// attestations.
|
|
|
|
///
|
|
|
|
/// Used for testing.
|
|
|
|
pub struct BeaconChainHarness<T: BeaconChainTypes> {
|
|
|
|
pub validator_keypairs: Vec<Keypair>,
|
|
|
|
|
|
|
|
pub chain: Arc<BeaconChain<T>>,
|
|
|
|
pub spec: ChainSpec,
|
2022-02-28 22:07:48 +00:00
|
|
|
pub shutdown_receiver: Arc<Mutex<Receiver<ShutdownReason>>>,
|
2022-05-16 08:35:59 +00:00
|
|
|
pub runtime: TestRuntime,
|
2021-10-14 02:58:10 +00:00
|
|
|
|
2021-10-02 01:39:11 +00:00
|
|
|
pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
|
2022-07-30 00:22:37 +00:00
|
|
|
pub mock_builder: Option<Arc<TestingBuilder<T::EthSpec>>>,
|
2021-10-02 01:39:11 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
pub rng: Mutex<StdRng>,
|
|
|
|
}
|
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
pub type CommitteeAttestations<E> = Vec<(Attestation<E>, SubnetId)>;
|
|
|
|
pub type HarnessAttestations<E> =
|
|
|
|
Vec<(CommitteeAttestations<E>, Option<SignedAggregateAndProof<E>>)>;
|
2021-10-14 02:58:10 +00:00
|
|
|
|
|
|
|
pub type HarnessSyncContributions<E> = Vec<(
|
|
|
|
Vec<(SyncCommitteeMessage, usize)>,
|
|
|
|
Option<SignedContributionAndProof<E>>,
|
|
|
|
)>;
|
|
|
|
|
|
|
|
impl<E, Hot, Cold> BeaconChainHarness<BaseHarnessType<E, Hot, Cold>>
|
|
|
|
where
|
|
|
|
E: EthSpec,
|
|
|
|
Hot: ItemStore<E>,
|
|
|
|
Cold: ItemStore<E>,
|
|
|
|
{
|
|
|
|
pub fn builder(eth_spec_instance: E) -> Builder<BaseHarnessType<E, Hot, Cold>> {
|
|
|
|
Builder::new(eth_spec_instance)
|
|
|
|
}
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
pub fn logger(&self) -> &slog::Logger {
|
|
|
|
&self.chain.log
|
|
|
|
}
|
|
|
|
|
2021-10-02 01:39:11 +00:00
|
|
|
pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator<E>> {
|
|
|
|
self.mock_execution_layer
|
|
|
|
.as_ref()
|
|
|
|
.expect("harness was not built with mock execution layer")
|
|
|
|
.server
|
|
|
|
.execution_block_generator()
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn get_all_validators(&self) -> Vec<usize> {
|
2020-10-19 05:58:39 +00:00
|
|
|
(0..self.validator_keypairs.len()).collect()
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn slots_per_epoch(&self) -> u64 {
|
|
|
|
E::slots_per_epoch()
|
2019-06-18 17:01:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn epoch_start_slot(&self, epoch: u64) -> u64 {
|
|
|
|
let epoch = Epoch::new(epoch);
|
|
|
|
epoch.start_slot(E::slots_per_epoch()).into()
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
}
|
|
|
|
|
2022-02-28 22:07:48 +00:00
|
|
|
pub fn shutdown_reasons(&self) -> Vec<ShutdownReason> {
|
|
|
|
let mutex = self.shutdown_receiver.clone();
|
|
|
|
let mut receiver = mutex.lock();
|
|
|
|
std::iter::from_fn(move || match receiver.try_next() {
|
|
|
|
Ok(Some(s)) => Some(s),
|
|
|
|
Ok(None) => panic!("shutdown sender dropped"),
|
|
|
|
Err(_) => None,
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn get_current_state(&self) -> BeaconState<E> {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
self.chain.head_beacon_state_cloned()
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
}
|
|
|
|
|
2022-07-18 23:15:41 +00:00
|
|
|
pub fn get_timestamp_at_slot(&self) -> u64 {
|
|
|
|
let state = self.get_current_state();
|
2022-11-30 05:22:58 +00:00
|
|
|
compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap()
|
2022-07-18 23:15:41 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
pub fn get_current_state_and_root(&self) -> (BeaconState<E>, Hash256) {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let head = self.chain.head_snapshot();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = head.beacon_state_root();
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
(
|
|
|
|
head.beacon_state.clone_with_only_committee_caches(),
|
|
|
|
state_root,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn head_slot(&self) -> Slot {
|
|
|
|
self.chain.canonical_head.cached_head().head_slot()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn head_block_root(&self) -> Hash256 {
|
|
|
|
self.chain.canonical_head.cached_head().head_block_root()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn finalized_checkpoint(&self) -> Checkpoint {
|
|
|
|
self.chain
|
|
|
|
.canonical_head
|
|
|
|
.cached_head()
|
|
|
|
.finalized_checkpoint()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn justified_checkpoint(&self) -> Checkpoint {
|
|
|
|
self.chain
|
|
|
|
.canonical_head
|
|
|
|
.cached_head()
|
|
|
|
.justified_checkpoint()
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn get_current_slot(&self) -> Slot {
|
2020-04-20 09:59:56 +00:00
|
|
|
self.chain.slot().unwrap()
|
|
|
|
}
|
|
|
|
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
pub fn get_block(
|
|
|
|
&self,
|
|
|
|
block_hash: SignedBeaconBlockHash,
|
|
|
|
) -> Option<SignedBeaconBlock<E, BlindedPayload<E>>> {
|
|
|
|
self.chain.get_blinded_block(&block_hash.into()).unwrap()
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool {
|
|
|
|
self.get_block(block_hash).is_some()
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option<BeaconState<E>> {
|
|
|
|
self.chain
|
|
|
|
.store
|
2021-12-21 06:30:52 +00:00
|
|
|
.load_hot_state(&state_hash.into(), StateRootStrategy::Accurate)
|
2020-08-26 09:24:55 +00:00
|
|
|
.unwrap()
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option<BeaconState<E>> {
|
|
|
|
self.chain
|
|
|
|
.store
|
|
|
|
.load_cold_state(&state_hash.into())
|
|
|
|
.unwrap()
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn hot_state_exists(&self, state_hash: BeaconStateHash) -> bool {
|
|
|
|
self.get_hot_state(state_hash).is_some()
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn cold_state_exists(&self, state_hash: BeaconStateHash) -> bool {
|
|
|
|
self.get_cold_state(state_hash).is_some()
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn is_skipped_slot(&self, state: &BeaconState<E>, slot: Slot) -> bool {
|
|
|
|
state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap()
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn make_block(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2019-06-20 00:59:19 +00:00
|
|
|
mut state: BeaconState<E>,
|
|
|
|
slot: Slot,
|
2020-02-10 23:19:36 +00:00
|
|
|
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
2020-08-26 09:24:55 +00:00
|
|
|
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
2021-07-09 06:15:32 +00:00
|
|
|
assert!(slot >= state.slot());
|
2019-06-20 00:59:19 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
complete_state_advance(&mut state, None, slot, &self.spec)
|
|
|
|
.expect("should be able to advance state to slot");
|
2019-06-20 00:59:19 +00:00
|
|
|
|
2019-08-29 01:34:25 +00:00
|
|
|
state
|
|
|
|
.build_all_caches(&self.spec)
|
|
|
|
.expect("should build caches");
|
2019-06-18 17:01:58 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
// If we produce two blocks for the same slot, they hash up to the same value and
|
|
|
|
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
|
|
|
|
// different blocks each time.
|
2020-10-19 05:58:39 +00:00
|
|
|
let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>());
|
2019-06-16 06:24:33 +00:00
|
|
|
|
Run fork choice before block proposal (#3168)
## Issue Addressed
Upcoming spec change https://github.com/ethereum/consensus-specs/pull/2878
## Proposed Changes
1. Run fork choice at the start of every slot, and wait for this run to complete before proposing a block.
2. As an optimisation, also run fork choice 3/4 of the way through the slot (at 9s), _dequeueing attestations for the next slot_.
3. Remove the fork choice run from the state advance timer that occurred before advancing the state.
## Additional Info
### Block Proposal Accuracy
This change makes us more likely to propose on top of the correct head in the presence of re-orgs with proposer boost in play. The main scenario that this change is designed to address is described in the linked spec issue.
### Attestation Accuracy
This change _also_ makes us more likely to attest to the correct head. Currently in the case of a skipped slot at `slot` we only run fork choice 9s into `slot - 1`. This means the attestations from `slot - 1` aren't taken into consideration, and any boost applied to the block from `slot - 1` is not removed (it should be). In the language of the linked spec issue, this means we are liable to attest to C, even when the majority voting weight has already caused a re-org to B.
### Why remove the call before the state advance?
If we've run fork choice at the start of the slot then it has already dequeued all the attestations from the previous slot, which are the only ones eligible to influence the head in the current slot. Running fork choice again is unnecessary (unless we run it for the next slot and try to pre-empt a re-org, but I don't currently think this is a great idea).
### Performance
Based on Prater testing this adds about 5-25ms of runtime to block proposal times, which are 500-1000ms on average (and spike to 5s+ sometimes due to state handling issues :cry: ). I believe this is a small enough penalty to enable it by default, with the option to disable it via the new flag `--fork-choice-before-proposal-timeout 0`. Upcoming work on block packing and state representation will also reduce block production times in general, while removing the spikes.
### Implementation
Fork choice gets invoked at the start of the slot via the `per_slot_task` function called from the slot timer. It then uses a condition variable to signal to block production that fork choice has been updated. This is a bit funky, but it seems to work. One downside of the timer-based approach is that it doesn't happen automatically in most of the tests. The test added by this PR has to trigger the run manually.
2022-05-20 05:02:11 +00:00
|
|
|
let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot);
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2020-02-10 23:19:36 +00:00
|
|
|
let (block, state) = self
|
2019-06-16 06:24:33 +00:00
|
|
|
.chain
|
2022-03-28 07:14:13 +00:00
|
|
|
.produce_block_on_state(
|
|
|
|
state,
|
|
|
|
None,
|
|
|
|
slot,
|
|
|
|
randao_reveal,
|
|
|
|
Some(graffiti),
|
|
|
|
ProduceBlockVerification::VerifyRandao,
|
|
|
|
)
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.await
|
2020-08-26 09:24:55 +00:00
|
|
|
.unwrap();
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let signed_block = block.sign(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self.validator_keypairs[proposer_index].sk,
|
2021-07-09 06:15:32 +00:00
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
2020-08-26 09:24:55 +00:00
|
|
|
&self.spec,
|
|
|
|
);
|
2019-06-16 06:24:33 +00:00
|
|
|
|
2020-02-10 23:19:36 +00:00
|
|
|
(signed_block, state)
|
2019-06-16 06:24:33 +00:00
|
|
|
}
|
2019-06-16 19:55:59 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
/// Useful for the `per_block_processing` tests. Creates a block, and returns the state after
|
|
|
|
/// caches are built but before the generated block is processed.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn make_block_return_pre_state(
|
2021-07-09 06:15:32 +00:00
|
|
|
&self,
|
|
|
|
mut state: BeaconState<E>,
|
|
|
|
slot: Slot,
|
|
|
|
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
|
|
|
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
|
|
|
assert!(slot >= state.slot());
|
|
|
|
|
|
|
|
complete_state_advance(&mut state, None, slot, &self.spec)
|
|
|
|
.expect("should be able to advance state to slot");
|
|
|
|
|
|
|
|
state
|
|
|
|
.build_all_caches(&self.spec)
|
|
|
|
.expect("should build caches");
|
|
|
|
|
|
|
|
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
|
|
|
|
|
|
|
// If we produce two blocks for the same slot, they hash up to the same value and
|
|
|
|
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
|
|
|
|
// different blocks each time.
|
|
|
|
let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>());
|
|
|
|
|
Run fork choice before block proposal (#3168)
## Issue Addressed
Upcoming spec change https://github.com/ethereum/consensus-specs/pull/2878
## Proposed Changes
1. Run fork choice at the start of every slot, and wait for this run to complete before proposing a block.
2. As an optimisation, also run fork choice 3/4 of the way through the slot (at 9s), _dequeueing attestations for the next slot_.
3. Remove the fork choice run from the state advance timer that occurred before advancing the state.
## Additional Info
### Block Proposal Accuracy
This change makes us more likely to propose on top of the correct head in the presence of re-orgs with proposer boost in play. The main scenario that this change is designed to address is described in the linked spec issue.
### Attestation Accuracy
This change _also_ makes us more likely to attest to the correct head. Currently in the case of a skipped slot at `slot` we only run fork choice 9s into `slot - 1`. This means the attestations from `slot - 1` aren't taken into consideration, and any boost applied to the block from `slot - 1` is not removed (it should be). In the language of the linked spec issue, this means we are liable to attest to C, even when the majority voting weight has already caused a re-org to B.
### Why remove the call before the state advance?
If we've run fork choice at the start of the slot then it has already dequeued all the attestations from the previous slot, which are the only ones eligible to influence the head in the current slot. Running fork choice again is unnecessary (unless we run it for the next slot and try to pre-empt a re-org, but I don't currently think this is a great idea).
### Performance
Based on Prater testing this adds about 5-25ms of runtime to block proposal times, which are 500-1000ms on average (and spike to 5s+ sometimes due to state handling issues :cry: ). I believe this is a small enough penalty to enable it by default, with the option to disable it via the new flag `--fork-choice-before-proposal-timeout 0`. Upcoming work on block packing and state representation will also reduce block production times in general, while removing the spikes.
### Implementation
Fork choice gets invoked at the start of the slot via the `per_slot_task` function called from the slot timer. It then uses a condition variable to signal to block production that fork choice has been updated. This is a bit funky, but it seems to work. One downside of the timer-based approach is that it doesn't happen automatically in most of the tests. The test added by this PR has to trigger the run manually.
2022-05-20 05:02:11 +00:00
|
|
|
let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot);
|
2021-07-09 06:15:32 +00:00
|
|
|
|
|
|
|
let pre_state = state.clone();
|
|
|
|
|
|
|
|
let (block, state) = self
|
|
|
|
.chain
|
2022-03-28 07:14:13 +00:00
|
|
|
.produce_block_on_state(
|
|
|
|
state,
|
|
|
|
None,
|
|
|
|
slot,
|
|
|
|
randao_reveal,
|
|
|
|
Some(graffiti),
|
|
|
|
ProduceBlockVerification::VerifyRandao,
|
|
|
|
)
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.await
|
2021-07-09 06:15:32 +00:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let signed_block = block.sign(
|
|
|
|
&self.validator_keypairs[proposer_index].sk,
|
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
(signed_block, pre_state)
|
|
|
|
}
|
|
|
|
|
Run fork choice before block proposal (#3168)
## Issue Addressed
Upcoming spec change https://github.com/ethereum/consensus-specs/pull/2878
## Proposed Changes
1. Run fork choice at the start of every slot, and wait for this run to complete before proposing a block.
2. As an optimisation, also run fork choice 3/4 of the way through the slot (at 9s), _dequeueing attestations for the next slot_.
3. Remove the fork choice run from the state advance timer that occurred before advancing the state.
## Additional Info
### Block Proposal Accuracy
This change makes us more likely to propose on top of the correct head in the presence of re-orgs with proposer boost in play. The main scenario that this change is designed to address is described in the linked spec issue.
### Attestation Accuracy
This change _also_ makes us more likely to attest to the correct head. Currently in the case of a skipped slot at `slot` we only run fork choice 9s into `slot - 1`. This means the attestations from `slot - 1` aren't taken into consideration, and any boost applied to the block from `slot - 1` is not removed (it should be). In the language of the linked spec issue, this means we are liable to attest to C, even when the majority voting weight has already caused a re-org to B.
### Why remove the call before the state advance?
If we've run fork choice at the start of the slot then it has already dequeued all the attestations from the previous slot, which are the only ones eligible to influence the head in the current slot. Running fork choice again is unnecessary (unless we run it for the next slot and try to pre-empt a re-org, but I don't currently think this is a great idea).
### Performance
Based on Prater testing this adds about 5-25ms of runtime to block proposal times, which are 500-1000ms on average (and spike to 5s+ sometimes due to state handling issues :cry: ). I believe this is a small enough penalty to enable it by default, with the option to disable it via the new flag `--fork-choice-before-proposal-timeout 0`. Upcoming work on block packing and state representation will also reduce block production times in general, while removing the spikes.
### Implementation
Fork choice gets invoked at the start of the slot via the `per_slot_task` function called from the slot timer. It then uses a condition variable to signal to block production that fork choice has been updated. This is a bit funky, but it seems to work. One downside of the timer-based approach is that it doesn't happen automatically in most of the tests. The test added by this PR has to trigger the run manually.
2022-05-20 05:02:11 +00:00
|
|
|
/// Create a randao reveal for a block at `slot`.
|
|
|
|
pub fn sign_randao_reveal(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
proposer_index: usize,
|
|
|
|
slot: Slot,
|
|
|
|
) -> Signature {
|
|
|
|
let epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
let domain = self.spec.get_domain(
|
|
|
|
epoch,
|
|
|
|
Domain::Randao,
|
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
);
|
|
|
|
let message = epoch.signing_root(domain);
|
|
|
|
let sk = &self.validator_keypairs[proposer_index].sk;
|
|
|
|
sk.sign(message)
|
|
|
|
}
|
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
/// Sign a beacon block using the proposer's key.
|
|
|
|
pub fn sign_beacon_block(
|
|
|
|
&self,
|
|
|
|
block: BeaconBlock<E>,
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
) -> SignedBeaconBlock<E> {
|
|
|
|
let proposer_index = block.proposer_index() as usize;
|
|
|
|
block.sign(
|
|
|
|
&self.validator_keypairs[proposer_index].sk,
|
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-04-13 03:54:42 +00:00
|
|
|
/// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to
|
|
|
|
/// `beacon_block_root`. The provided `state` should match the `block.state_root` for the
|
|
|
|
/// `block` identified by `beacon_block_root`.
|
|
|
|
///
|
|
|
|
/// The attestation doesn't _really_ have anything about it that makes it unaggregated per say,
|
|
|
|
/// however this function is only required in the context of forming an unaggregated
|
|
|
|
/// attestation. It would be an (undetectable) violation of the protocol to create a
|
|
|
|
/// `SignedAggregateAndProof` based upon the output of this function.
|
|
|
|
///
|
|
|
|
/// This function will produce attestations to optimistic blocks, which is against the
|
|
|
|
/// specification but useful during testing.
|
|
|
|
pub fn produce_unaggregated_attestation_for_block(
|
|
|
|
&self,
|
|
|
|
slot: Slot,
|
|
|
|
index: CommitteeIndex,
|
|
|
|
beacon_block_root: Hash256,
|
|
|
|
mut state: Cow<BeaconState<E>>,
|
|
|
|
state_root: Hash256,
|
|
|
|
) -> Result<Attestation<E>, BeaconChainError> {
|
|
|
|
let epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
|
|
|
|
if state.slot() > slot {
|
|
|
|
return Err(BeaconChainError::CannotAttestToFutureState);
|
|
|
|
} else if state.current_epoch() < epoch {
|
|
|
|
let mut_state = state.to_mut();
|
|
|
|
// Only perform a "partial" state advance since we do not require the state roots to be
|
|
|
|
// accurate.
|
|
|
|
partial_state_advance(
|
|
|
|
mut_state,
|
|
|
|
Some(state_root),
|
|
|
|
epoch.start_slot(E::slots_per_epoch()),
|
|
|
|
&self.spec,
|
|
|
|
)?;
|
|
|
|
mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let committee_len = state.get_beacon_committee(slot, index)?.committee.len();
|
|
|
|
|
|
|
|
let target_slot = epoch.start_slot(E::slots_per_epoch());
|
|
|
|
let target_root = if state.slot() <= target_slot {
|
|
|
|
beacon_block_root
|
|
|
|
} else {
|
|
|
|
*state.get_block_root(target_slot)?
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(Attestation {
|
|
|
|
aggregation_bits: BitList::with_capacity(committee_len)?,
|
|
|
|
data: AttestationData {
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
beacon_block_root,
|
|
|
|
source: state.current_justified_checkpoint(),
|
|
|
|
target: Checkpoint {
|
|
|
|
epoch,
|
|
|
|
root: target_root,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
signature: AggregateSignature::empty(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
/// A list of attestations for each committee for the given slot.
|
2019-06-21 01:55:37 +00:00
|
|
|
///
|
2020-05-06 11:42:56 +00:00
|
|
|
/// The first layer of the Vec is organised per committee. For example, if the return value is
|
|
|
|
/// called `all_attestations`, then all attestations in `all_attestations[0]` will be for
|
|
|
|
/// committee 0, whilst all in `all_attestations[1]` will be for committee 1.
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn make_unaggregated_attestations(
|
2019-08-14 00:55:24 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
attesting_validators: &[usize],
|
2019-08-14 00:55:24 +00:00
|
|
|
state: &BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
head_block_root: SignedBeaconBlockHash,
|
2020-05-06 11:42:56 +00:00
|
|
|
attestation_slot: Slot,
|
2022-12-13 09:57:26 +00:00
|
|
|
) -> Vec<CommitteeAttestations<E>> {
|
|
|
|
self.make_unaggregated_attestations_with_limit(
|
|
|
|
attesting_validators,
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
head_block_root,
|
|
|
|
attestation_slot,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.0
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_unaggregated_attestations_with_limit(
|
|
|
|
&self,
|
|
|
|
attesting_validators: &[usize],
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
state_root: Hash256,
|
|
|
|
head_block_root: SignedBeaconBlockHash,
|
|
|
|
attestation_slot: Slot,
|
|
|
|
limit: Option<usize>,
|
|
|
|
) -> (Vec<CommitteeAttestations<E>>, Vec<usize>) {
|
2021-07-09 06:15:32 +00:00
|
|
|
let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap();
|
2021-08-30 06:41:31 +00:00
|
|
|
let fork = self
|
|
|
|
.spec
|
|
|
|
.fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch()));
|
2020-06-18 09:11:03 +00:00
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
let attesters = Mutex::new(vec![]);
|
|
|
|
|
|
|
|
let attestations = state
|
2020-09-29 03:46:54 +00:00
|
|
|
.get_beacon_committees_at_slot(attestation_slot)
|
|
|
|
.expect("should get committees")
|
2019-06-16 19:55:59 +00:00
|
|
|
.iter()
|
2020-05-06 11:42:56 +00:00
|
|
|
.map(|bc| {
|
|
|
|
bc.committee
|
2019-08-29 01:34:25 +00:00
|
|
|
.par_iter()
|
|
|
|
.enumerate()
|
|
|
|
.filter_map(|(i, validator_index)| {
|
2020-05-06 11:42:56 +00:00
|
|
|
if !attesting_validators.contains(validator_index) {
|
|
|
|
return None;
|
2019-08-29 01:34:25 +00:00
|
|
|
}
|
2022-12-13 09:57:26 +00:00
|
|
|
|
|
|
|
let mut attesters = attesters.lock();
|
|
|
|
if let Some(limit) = limit {
|
|
|
|
if attesters.len() >= limit {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
attesters.push(*validator_index);
|
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
let mut attestation = self
|
|
|
|
.produce_unaggregated_attestation_for_block(
|
|
|
|
attestation_slot,
|
|
|
|
bc.index,
|
2020-08-26 09:24:55 +00:00
|
|
|
head_block_root.into(),
|
2020-05-06 11:42:56 +00:00
|
|
|
Cow::Borrowed(state),
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-05-06 11:42:56 +00:00
|
|
|
)
|
2020-08-26 09:24:55 +00:00
|
|
|
.unwrap();
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
attestation.aggregation_bits.set(i, true).unwrap();
|
2020-05-06 11:42:56 +00:00
|
|
|
|
|
|
|
attestation.signature = {
|
2020-08-26 09:24:55 +00:00
|
|
|
let domain = self.spec.get_domain(
|
2020-05-06 11:42:56 +00:00
|
|
|
attestation.data.target.epoch,
|
|
|
|
Domain::BeaconAttester,
|
2021-08-30 06:41:31 +00:00
|
|
|
&fork,
|
2021-07-09 06:15:32 +00:00
|
|
|
state.genesis_validators_root(),
|
2020-05-06 11:42:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
let message = attestation.data.signing_root(domain);
|
|
|
|
|
2020-07-25 02:03:18 +00:00
|
|
|
let mut agg_sig = AggregateSignature::infinity();
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
agg_sig.add_assign(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self.validator_keypairs[*validator_index].sk.sign(message),
|
2020-08-26 09:24:55 +00:00
|
|
|
);
|
2020-05-06 11:42:56 +00:00
|
|
|
|
|
|
|
agg_sig
|
|
|
|
};
|
|
|
|
|
2020-06-18 09:11:03 +00:00
|
|
|
let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>(
|
|
|
|
&attestation.data,
|
|
|
|
committee_count,
|
|
|
|
&self.chain.spec,
|
|
|
|
)
|
2020-08-26 09:24:55 +00:00
|
|
|
.unwrap();
|
2020-06-18 09:11:03 +00:00
|
|
|
|
|
|
|
Some((attestation, subnet_id))
|
2019-08-29 01:34:25 +00:00
|
|
|
})
|
2022-12-13 09:57:26 +00:00
|
|
|
.collect::<Vec<_>>()
|
2020-05-06 11:42:56 +00:00
|
|
|
})
|
2022-12-13 09:57:26 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let attesters = attesters.into_inner();
|
|
|
|
if let Some(limit) = limit {
|
|
|
|
assert_eq!(
|
|
|
|
limit,
|
|
|
|
attesters.len(),
|
|
|
|
"failed to generate `limit` attestations"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
(attestations, attesters)
|
2020-05-06 11:42:56 +00:00
|
|
|
}
|
2019-08-29 01:34:25 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
/// A list of sync messages for the given state.
|
|
|
|
pub fn make_sync_committee_messages(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
head_block_root: Hash256,
|
|
|
|
message_slot: Slot,
|
|
|
|
relative_sync_committee: RelativeSyncCommittee,
|
|
|
|
) -> Vec<Vec<(SyncCommitteeMessage, usize)>> {
|
|
|
|
let sync_committee: Arc<SyncCommittee<E>> = match relative_sync_committee {
|
|
|
|
RelativeSyncCommittee::Current => state
|
|
|
|
.current_sync_committee()
|
|
|
|
.expect("should be called on altair beacon state")
|
|
|
|
.clone(),
|
|
|
|
RelativeSyncCommittee::Next => state
|
|
|
|
.next_sync_committee()
|
|
|
|
.expect("should be called on altair beacon state")
|
|
|
|
.clone(),
|
|
|
|
};
|
2021-08-30 06:41:31 +00:00
|
|
|
let fork = self
|
|
|
|
.spec
|
|
|
|
.fork_at_epoch(message_slot.epoch(E::slots_per_epoch()));
|
2021-07-15 00:52:02 +00:00
|
|
|
|
|
|
|
sync_committee
|
|
|
|
.pubkeys
|
|
|
|
.as_ref()
|
|
|
|
.chunks(E::sync_subcommittee_size())
|
|
|
|
.map(|subcommittee| {
|
|
|
|
subcommittee
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(subcommittee_position, pubkey)| {
|
|
|
|
let validator_index = self
|
|
|
|
.chain
|
|
|
|
.validator_index(pubkey)
|
|
|
|
.expect("should find validator index")
|
|
|
|
.expect("pubkey should exist in the beacon chain");
|
|
|
|
|
|
|
|
let sync_message = SyncCommitteeMessage::new::<E>(
|
|
|
|
message_slot,
|
|
|
|
head_block_root,
|
|
|
|
validator_index as u64,
|
|
|
|
&self.validator_keypairs[validator_index].sk,
|
2021-08-30 06:41:31 +00:00
|
|
|
&fork,
|
2021-07-15 00:52:02 +00:00
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
(sync_message, subcommittee_position)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Use make_unaggregated_attestations() instead.
|
|
|
|
///
|
|
|
|
/// A list of attestations for each committee for the given slot.
|
|
|
|
///
|
|
|
|
/// The first layer of the Vec is organised per committee. For example, if the return value is
|
|
|
|
/// called `all_attestations`, then all attestations in `all_attestations[0]` will be for
|
|
|
|
/// committee 0, whilst all in `all_attestations[1]` will be for committee 1.
|
|
|
|
pub fn get_unaggregated_attestations(
|
2020-05-06 11:42:56 +00:00
|
|
|
&self,
|
|
|
|
attestation_strategy: &AttestationStrategy,
|
|
|
|
state: &BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-05-06 11:42:56 +00:00
|
|
|
head_block_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
attestation_slot: Slot,
|
|
|
|
) -> Vec<Vec<(Attestation<E>, SubnetId)>> {
|
|
|
|
let validators: Vec<usize> = match attestation_strategy {
|
|
|
|
AttestationStrategy::AllValidators => self.get_all_validators(),
|
|
|
|
AttestationStrategy::SomeValidators(vals) => vals.clone(),
|
|
|
|
};
|
|
|
|
self.make_unaggregated_attestations(
|
|
|
|
&validators,
|
2020-05-06 11:42:56 +00:00
|
|
|
state,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
head_block_root.into(),
|
|
|
|
attestation_slot,
|
|
|
|
)
|
|
|
|
}
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn make_attestations(
|
|
|
|
&self,
|
|
|
|
attesting_validators: &[usize],
|
|
|
|
state: &BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
block_hash: SignedBeaconBlockHash,
|
|
|
|
slot: Slot,
|
|
|
|
) -> HarnessAttestations<E> {
|
2022-12-13 09:57:26 +00:00
|
|
|
self.make_attestations_with_limit(
|
2021-07-30 01:11:47 +00:00
|
|
|
attesting_validators,
|
|
|
|
state,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
|
|
|
block_hash,
|
|
|
|
slot,
|
2022-12-13 09:57:26 +00:00
|
|
|
None,
|
|
|
|
)
|
|
|
|
.0
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Produce exactly `limit` attestations.
|
|
|
|
///
|
|
|
|
/// Return attestations and vec of validator indices that attested.
|
|
|
|
pub fn make_attestations_with_limit(
|
|
|
|
&self,
|
|
|
|
attesting_validators: &[usize],
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
state_root: Hash256,
|
|
|
|
block_hash: SignedBeaconBlockHash,
|
|
|
|
slot: Slot,
|
|
|
|
limit: Option<usize>,
|
|
|
|
) -> (HarnessAttestations<E>, Vec<usize>) {
|
|
|
|
let (unaggregated_attestations, attesters) = self
|
|
|
|
.make_unaggregated_attestations_with_limit(
|
|
|
|
attesting_validators,
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
block_hash,
|
|
|
|
slot,
|
|
|
|
limit,
|
|
|
|
);
|
2021-08-30 06:41:31 +00:00
|
|
|
let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch()));
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
let aggregated_attestations: Vec<Option<SignedAggregateAndProof<E>>> =
|
|
|
|
unaggregated_attestations
|
|
|
|
.iter()
|
|
|
|
.map(|committee_attestations| {
|
|
|
|
// If there are any attestations in this committee, create an aggregate.
|
|
|
|
if let Some((attestation, _)) = committee_attestations.first() {
|
|
|
|
let bc = state
|
|
|
|
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
|
|
|
.unwrap();
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
// Find an aggregator if one exists. Return `None` if there are no
|
|
|
|
// aggregators.
|
|
|
|
let aggregator_index = bc
|
|
|
|
.committee
|
|
|
|
.iter()
|
|
|
|
.find(|&validator_index| {
|
2022-12-13 09:57:26 +00:00
|
|
|
if !attesters.contains(validator_index) {
|
2021-07-09 06:15:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
let selection_proof = SelectionProof::new::<E>(
|
2021-08-30 06:41:31 +00:00
|
|
|
slot,
|
2021-07-09 06:15:32 +00:00
|
|
|
&self.validator_keypairs[*validator_index].sk,
|
2021-08-30 06:41:31 +00:00
|
|
|
&fork,
|
2021-07-09 06:15:32 +00:00
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
selection_proof
|
|
|
|
.is_aggregator(bc.committee.len(), &self.spec)
|
|
|
|
.unwrap_or(false)
|
2020-05-06 11:42:56 +00:00
|
|
|
})
|
2021-07-09 06:15:32 +00:00
|
|
|
.copied()?;
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
// If the chain is able to produce an aggregate, use that. Otherwise, build an
|
|
|
|
// aggregate locally.
|
|
|
|
let aggregate = self
|
|
|
|
.chain
|
|
|
|
.get_aggregated_attestation(&attestation.data)
|
2022-04-13 03:54:42 +00:00
|
|
|
.unwrap()
|
2021-07-09 06:15:32 +00:00
|
|
|
.unwrap_or_else(|| {
|
|
|
|
committee_attestations.iter().skip(1).fold(
|
|
|
|
attestation.clone(),
|
|
|
|
|mut agg, (att, _)| {
|
|
|
|
agg.aggregate(att);
|
|
|
|
agg
|
|
|
|
},
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
let signed_aggregate = SignedAggregateAndProof::from_aggregate(
|
|
|
|
aggregator_index as u64,
|
|
|
|
aggregate,
|
|
|
|
None,
|
|
|
|
&self.validator_keypairs[aggregator_index].sk,
|
2021-08-30 06:41:31 +00:00
|
|
|
&fork,
|
2021-07-09 06:15:32 +00:00
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
Some(signed_aggregate)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
(
|
|
|
|
unaggregated_attestations
|
|
|
|
.into_iter()
|
|
|
|
.zip(aggregated_attestations)
|
|
|
|
.collect(),
|
|
|
|
attesters,
|
|
|
|
)
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
|
2021-07-15 00:52:02 +00:00
|
|
|
pub fn make_sync_contributions(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<E>,
|
|
|
|
block_hash: Hash256,
|
|
|
|
slot: Slot,
|
|
|
|
relative_sync_committee: RelativeSyncCommittee,
|
|
|
|
) -> HarnessSyncContributions<E> {
|
|
|
|
let sync_messages =
|
2021-07-30 01:11:47 +00:00
|
|
|
self.make_sync_committee_messages(state, block_hash, slot, relative_sync_committee);
|
2021-07-15 00:52:02 +00:00
|
|
|
|
|
|
|
let sync_contributions: Vec<Option<SignedContributionAndProof<E>>> = sync_messages
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(subnet_id, committee_messages)| {
|
|
|
|
// If there are any sync messages in this committee, create an aggregate.
|
|
|
|
if let Some((sync_message, subcommittee_position)) = committee_messages.first() {
|
|
|
|
let sync_committee: Arc<SyncCommittee<E>> = state
|
|
|
|
.current_sync_committee()
|
|
|
|
.expect("should be called on altair beacon state")
|
|
|
|
.clone();
|
|
|
|
|
|
|
|
let aggregator_index = sync_committee
|
|
|
|
.get_subcommittee_pubkeys(subnet_id)
|
|
|
|
.unwrap()
|
|
|
|
.iter()
|
|
|
|
.find_map(|pubkey| {
|
|
|
|
let validator_index = self
|
|
|
|
.chain
|
|
|
|
.validator_index(pubkey)
|
|
|
|
.expect("should find validator index")
|
|
|
|
.expect("pubkey should exist in the beacon chain");
|
|
|
|
|
|
|
|
let selection_proof = SyncSelectionProof::new::<E>(
|
|
|
|
slot,
|
|
|
|
subnet_id as u64,
|
|
|
|
&self.validator_keypairs[validator_index].sk,
|
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
selection_proof
|
|
|
|
.is_aggregator::<E>()
|
|
|
|
.expect("should determine aggregator")
|
2022-09-23 03:52:46 +00:00
|
|
|
.then_some(validator_index)
|
2021-07-15 00:52:02 +00:00
|
|
|
})?;
|
|
|
|
|
|
|
|
let default = SyncCommitteeContribution::from_message(
|
2021-07-30 01:11:47 +00:00
|
|
|
sync_message,
|
2021-07-15 00:52:02 +00:00
|
|
|
subnet_id as u64,
|
|
|
|
*subcommittee_position,
|
|
|
|
)
|
|
|
|
.expect("should derive sync contribution");
|
|
|
|
|
|
|
|
let aggregate = committee_messages.iter().skip(1).fold(
|
|
|
|
default,
|
|
|
|
|mut agg, (sig, position)| {
|
|
|
|
let contribution = SyncCommitteeContribution::from_message(
|
|
|
|
sig,
|
|
|
|
subnet_id as u64,
|
|
|
|
*position,
|
|
|
|
)
|
|
|
|
.expect("should derive sync contribution");
|
|
|
|
agg.aggregate(&contribution);
|
|
|
|
agg
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
let signed_aggregate = SignedContributionAndProof::from_aggregate(
|
|
|
|
aggregator_index as u64,
|
|
|
|
aggregate,
|
|
|
|
None,
|
|
|
|
&self.validator_keypairs[aggregator_index].sk,
|
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
Some(signed_aggregate)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
sync_messages.into_iter().zip(sync_contributions).collect()
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
pub fn make_attester_slashing(&self, validator_indices: Vec<u64>) -> AttesterSlashing<E> {
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
self.make_attester_slashing_with_epochs(validator_indices, None, None, None, None)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_attester_slashing_with_epochs(
|
|
|
|
&self,
|
|
|
|
validator_indices: Vec<u64>,
|
|
|
|
source1: Option<Epoch>,
|
|
|
|
target1: Option<Epoch>,
|
|
|
|
source2: Option<Epoch>,
|
|
|
|
target2: Option<Epoch>,
|
|
|
|
) -> AttesterSlashing<E> {
|
|
|
|
let fork = self.chain.canonical_head.cached_head().head_fork();
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
let mut attestation_1 = IndexedAttestation {
|
|
|
|
attesting_indices: VariableList::new(validator_indices).unwrap(),
|
|
|
|
data: AttestationData {
|
|
|
|
slot: Slot::new(0),
|
|
|
|
index: 0,
|
|
|
|
beacon_block_root: Hash256::zero(),
|
|
|
|
target: Checkpoint {
|
|
|
|
root: Hash256::zero(),
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
epoch: target1.unwrap_or(fork.epoch),
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
source: Checkpoint {
|
|
|
|
root: Hash256::zero(),
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
epoch: source1.unwrap_or(Epoch::new(0)),
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
signature: AggregateSignature::infinity(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut attestation_2 = attestation_1.clone();
|
|
|
|
attestation_2.data.index += 1;
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0));
|
|
|
|
attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
for attestation in &mut [&mut attestation_1, &mut attestation_2] {
|
|
|
|
for &i in &attestation.attesting_indices {
|
2020-10-19 05:58:39 +00:00
|
|
|
let sk = &self.validator_keypairs[i as usize].sk;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
|
|
|
|
|
|
|
let domain = self.chain.spec.get_domain(
|
|
|
|
attestation.data.target.epoch,
|
|
|
|
Domain::BeaconAttester,
|
|
|
|
&fork,
|
|
|
|
genesis_validators_root,
|
|
|
|
);
|
|
|
|
let message = attestation.data.signing_root(domain);
|
|
|
|
|
|
|
|
attestation.signature.add_assign(&sk.sign(message));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AttesterSlashing {
|
|
|
|
attestation_1,
|
|
|
|
attestation_2,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
pub fn make_attester_slashing_different_indices(
|
|
|
|
&self,
|
|
|
|
validator_indices_1: Vec<u64>,
|
|
|
|
validator_indices_2: Vec<u64>,
|
|
|
|
) -> AttesterSlashing<E> {
|
|
|
|
let data = AttestationData {
|
|
|
|
slot: Slot::new(0),
|
|
|
|
index: 0,
|
|
|
|
beacon_block_root: Hash256::zero(),
|
|
|
|
target: Checkpoint {
|
|
|
|
root: Hash256::zero(),
|
|
|
|
epoch: Epoch::new(0),
|
|
|
|
},
|
|
|
|
source: Checkpoint {
|
|
|
|
root: Hash256::zero(),
|
|
|
|
epoch: Epoch::new(0),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut attestation_1 = IndexedAttestation {
|
|
|
|
attesting_indices: VariableList::new(validator_indices_1).unwrap(),
|
|
|
|
data: data.clone(),
|
|
|
|
signature: AggregateSignature::infinity(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut attestation_2 = IndexedAttestation {
|
|
|
|
attesting_indices: VariableList::new(validator_indices_2).unwrap(),
|
|
|
|
data,
|
|
|
|
signature: AggregateSignature::infinity(),
|
|
|
|
};
|
|
|
|
|
|
|
|
attestation_2.data.index += 1;
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let fork = self.chain.canonical_head.cached_head().head_fork();
|
2021-07-09 06:15:32 +00:00
|
|
|
for attestation in &mut [&mut attestation_1, &mut attestation_2] {
|
|
|
|
for &i in &attestation.attesting_indices {
|
|
|
|
let sk = &self.validator_keypairs[i as usize].sk;
|
|
|
|
|
|
|
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
|
|
|
|
|
|
|
let domain = self.chain.spec.get_domain(
|
|
|
|
attestation.data.target.epoch,
|
|
|
|
Domain::BeaconAttester,
|
|
|
|
&fork,
|
|
|
|
genesis_validators_root,
|
|
|
|
);
|
|
|
|
let message = attestation.data.signing_root(domain);
|
|
|
|
|
|
|
|
attestation.signature.add_assign(&sk.sign(message));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AttesterSlashing {
|
|
|
|
attestation_1,
|
|
|
|
attestation_2,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing {
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
self.make_proposer_slashing_at_slot(validator_index, None)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_proposer_slashing_at_slot(
|
|
|
|
&self,
|
|
|
|
validator_index: u64,
|
|
|
|
slot_override: Option<Slot>,
|
|
|
|
) -> ProposerSlashing {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let mut block_header_1 = self.chain.head_beacon_block().message().block_header();
|
2020-09-29 03:46:54 +00:00
|
|
|
block_header_1.proposer_index = validator_index;
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
if let Some(slot) = slot_override {
|
|
|
|
block_header_1.slot = slot;
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
let mut block_header_2 = block_header_1.clone();
|
|
|
|
block_header_2.state_root = Hash256::zero();
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let sk = &self.validator_keypairs[validator_index as usize].sk;
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let fork = self.chain.canonical_head.cached_head().head_fork();
|
2020-09-29 03:46:54 +00:00
|
|
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
|
|
|
|
|
|
|
let mut signed_block_headers = vec![block_header_1, block_header_2]
|
|
|
|
.into_iter()
|
|
|
|
.map(|block_header| {
|
2021-07-30 01:11:47 +00:00
|
|
|
block_header.sign::<E>(sk, &fork, genesis_validators_root, &self.chain.spec)
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
ProposerSlashing {
|
|
|
|
signed_header_2: signed_block_headers.remove(1),
|
|
|
|
signed_header_1: signed_block_headers.remove(0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit {
|
2020-10-19 05:58:39 +00:00
|
|
|
let sk = &self.validator_keypairs[validator_index as usize].sk;
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let fork = self.chain.canonical_head.cached_head().head_fork();
|
2020-09-29 03:46:54 +00:00
|
|
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
|
|
|
|
|
|
|
VoluntaryExit {
|
|
|
|
epoch,
|
|
|
|
validator_index,
|
|
|
|
}
|
|
|
|
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
|
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
pub fn add_voluntary_exit(
|
|
|
|
&self,
|
|
|
|
block: &mut BeaconBlock<E>,
|
|
|
|
validator_index: u64,
|
|
|
|
epoch: Epoch,
|
|
|
|
) {
|
|
|
|
let exit = self.make_voluntary_exit(validator_index, epoch);
|
|
|
|
block.body_mut().voluntary_exits_mut().push(exit).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a new block, apply `block_modifier` to it, sign it and return it.
|
|
|
|
///
|
|
|
|
/// The state returned is a pre-block state at the same slot as the produced block.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn make_block_with_modifier(
|
2021-07-09 06:15:32 +00:00
|
|
|
&self,
|
|
|
|
state: BeaconState<E>,
|
|
|
|
slot: Slot,
|
|
|
|
block_modifier: impl FnOnce(&mut BeaconBlock<E>),
|
|
|
|
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
|
|
|
assert_ne!(slot, 0, "can't produce a block at slot 0");
|
|
|
|
assert!(slot >= state.slot());
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (block, state) = self.make_block_return_pre_state(state, slot).await;
|
2021-07-09 06:15:32 +00:00
|
|
|
let (mut block, _) = block.deconstruct();
|
|
|
|
|
|
|
|
block_modifier(&mut block);
|
|
|
|
|
|
|
|
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
|
|
|
|
|
|
|
let signed_block = block.sign(
|
2022-12-16 04:04:00 +00:00
|
|
|
&self.validator_keypairs[proposer_index].sk,
|
2021-07-09 06:15:32 +00:00
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
|
|
|
&self.spec,
|
|
|
|
);
|
|
|
|
(signed_block, state)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_deposits<'a>(
|
|
|
|
&self,
|
|
|
|
state: &'a mut BeaconState<E>,
|
|
|
|
num_deposits: usize,
|
|
|
|
invalid_pubkey: Option<PublicKeyBytes>,
|
|
|
|
invalid_signature: Option<SignatureBytes>,
|
|
|
|
) -> (Vec<Deposit>, &'a mut BeaconState<E>) {
|
|
|
|
let mut datas = vec![];
|
|
|
|
|
|
|
|
for _ in 0..num_deposits {
|
|
|
|
let keypair = Keypair::random();
|
|
|
|
let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone());
|
|
|
|
|
|
|
|
let mut data = DepositData {
|
|
|
|
pubkey: pubkeybytes,
|
|
|
|
withdrawal_credentials: Hash256::from_slice(
|
|
|
|
&get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte)
|
|
|
|
[..],
|
|
|
|
),
|
|
|
|
amount: self.spec.min_deposit_amount,
|
|
|
|
signature: SignatureBytes::empty(),
|
|
|
|
};
|
|
|
|
|
|
|
|
data.signature = data.create_signature(&keypair.sk, &self.spec);
|
|
|
|
|
|
|
|
if let Some(invalid_pubkey) = invalid_pubkey {
|
|
|
|
data.pubkey = invalid_pubkey;
|
|
|
|
}
|
|
|
|
if let Some(invalid_signature) = invalid_signature.clone() {
|
|
|
|
data.signature = invalid_signature;
|
|
|
|
}
|
|
|
|
datas.push(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Vector containing all leaves
|
|
|
|
let leaves = datas
|
|
|
|
.iter()
|
|
|
|
.map(|data| data.tree_hash_root())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Building a VarList from leaves
|
|
|
|
let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone());
|
|
|
|
|
|
|
|
// Setting the deposit_root to be the tree_hash_root of the VarList
|
|
|
|
state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root();
|
|
|
|
state.eth1_data_mut().deposit_count = num_deposits as u64;
|
|
|
|
*state.eth1_deposit_index_mut() = 0;
|
|
|
|
|
|
|
|
// Building the merkle tree used for generating proofs
|
|
|
|
let tree = MerkleTree::create(&leaves[..], self.spec.deposit_contract_tree_depth as usize);
|
|
|
|
|
|
|
|
// Building proofs
|
|
|
|
let mut proofs = vec![];
|
|
|
|
for i in 0..leaves.len() {
|
2022-10-30 04:04:24 +00:00
|
|
|
let (_, mut proof) = tree
|
|
|
|
.generate_proof(i, self.spec.deposit_contract_tree_depth as usize)
|
|
|
|
.expect("should generate proof");
|
2021-07-09 06:15:32 +00:00
|
|
|
proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64)));
|
|
|
|
proofs.push(proof);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Building deposits
|
|
|
|
let deposits = datas
|
|
|
|
.into_par_iter()
|
|
|
|
.zip(proofs.into_par_iter())
|
|
|
|
.map(|(data, proof)| (data, proof.into()))
|
|
|
|
.map(|(data, proof)| Deposit { proof, data })
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Pushing deposits to block body
|
|
|
|
(deposits, state)
|
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn process_block(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
|
|
|
slot: Slot,
|
2022-09-23 03:52:42 +00:00
|
|
|
block_root: Hash256,
|
2020-10-19 05:58:39 +00:00
|
|
|
block: SignedBeaconBlock<E>,
|
|
|
|
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
|
|
|
self.set_current_slot(slot);
|
2022-07-25 23:53:26 +00:00
|
|
|
let block_hash: SignedBeaconBlockHash = self
|
|
|
|
.chain
|
2022-11-29 08:19:27 +00:00
|
|
|
.process_block(
|
|
|
|
block_root,
|
|
|
|
Arc::new(block),
|
|
|
|
CountUnrealized::True,
|
|
|
|
NotifyExecutionLayer::Yes,
|
|
|
|
)
|
2022-07-25 23:53:26 +00:00
|
|
|
.await?
|
|
|
|
.into();
|
2022-07-28 13:57:09 +00:00
|
|
|
self.chain.recompute_head_at_current_slot().await;
|
2020-10-19 05:58:39 +00:00
|
|
|
Ok(block_hash)
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
v0.12 fork choice update (#1229)
* Incomplete scraps
* Add progress on new fork choice impl
* Further progress
* First complete compiling version
* Remove chain reference
* Add new lmd_ghost crate
* Start integrating into beacon chain
* Update `milagro_bls` to new release (#1183)
* Update milagro_bls to new release
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Tidy up fake cryptos
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* move SecretHash to bls and put plaintext back
Signed-off-by: Kirk Baird <baird.k@outlook.com>
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Rough beacon chain impl working
* Remove fork_choice_2
* Remove checkpoint manager
* Half finished ssz impl
* Add missed file
* Add persistence
* Tidy, fix some compile errors
* Remove RwLock from ProtoArrayForkChoice
* Fix store-based compile errors
* Add comments, tidy
* Move function out of ForkChoice struct
* Start testing
* More testing
* Fix compile error
* Tidy beacon_chain::fork_choice
* Queue attestations from the current slot
* Allow fork choice to handle prior-to-genesis start
* Improve error granularity
* Test attestation dequeuing
* Process attestations during block
* Store target root in fork choice
* Move fork choice verification into new crate
* Update tests
* Consensus updates for v0.12 (#1228)
* Update state processing for v0.12
* Fix EF test runners for v0.12
* Fix some tests
* Fix broken attestation verification test
* More test fixes
* Fix typo found in review
* Add `Block` struct to ProtoArray
* Start fixing get_ancestor
* Add rough progress on testing
* Get fork choice tests working
* Progress with testing
* Fix partialeq impl
* Move slot clock from fc_store
* Improve testing
* Add testing for best justified
* Add clone back to SystemTimeSlotClock
* Add balances test
* Start adding balances cache again
* Wire-in balances cache
* Improve tests
* Remove commented-out tests
* Remove beacon_chain::ForkChoice
* Rename crates
* Update wider codebase to new fork_choice layout
* Move advance_slot in test harness
* Tidy ForkChoice::update_time
* Fix verification tests
* Fix compile error with iter::once
* Fix fork choice tests
* Ensure block attestations are processed
* Fix failing beacon_chain tests
* Add first invalid block check
* Add finalized block check
* Progress with testing, new store builder
* Add fixes to get_ancestor
* Fix old genesis justification test
* Fix remaining fork choice tests
* Change root iteration method
* Move on_verified_block
* Remove unused method
* Start adding attestation verification tests
* Add invalid ffg target test
* Add target epoch test
* Add queued attestation test
* Remove old fork choice verification tests
* Tidy, add test
* Move fork choice lock drop
* Rename BeaconForkChoiceStore
* Add comments, tidy BeaconForkChoiceStore
* Update metrics, rename fork_choice_store.rs
* Remove genesis_block_root from ForkChoice
* Tidy
* Update fork_choice comments
* Tidy, add comments
* Tidy, simplify ForkChoice, fix compile issue
* Tidy, removed dead file
* Increase http request timeout
* Fix failing rest_api test
* Set HTTP timeout back to 5s
* Apply fix to get_ancestor
* Address Michael's comments
* Fix typo
* Revert "Fix broken attestation verification test"
This reverts commit 722cdc903b12611de27916a57eeecfa3224f2279.
Co-authored-by: Kirk Baird <baird.k@outlook.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2020-06-17 01:10:22 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn process_block_result(
|
2020-10-01 01:41:58 +00:00
|
|
|
&self,
|
|
|
|
block: SignedBeaconBlock<E>,
|
|
|
|
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
|
2022-07-25 23:53:26 +00:00
|
|
|
let block_hash: SignedBeaconBlockHash = self
|
|
|
|
.chain
|
2022-09-23 03:52:42 +00:00
|
|
|
.process_block(
|
|
|
|
block.canonical_root(),
|
|
|
|
Arc::new(block),
|
|
|
|
CountUnrealized::True,
|
2022-11-29 08:19:27 +00:00
|
|
|
NotifyExecutionLayer::Yes,
|
2022-09-23 03:52:42 +00:00
|
|
|
)
|
2022-07-25 23:53:26 +00:00
|
|
|
.await?
|
|
|
|
.into();
|
2022-07-28 13:57:09 +00:00
|
|
|
self.chain.recompute_head_at_current_slot().await;
|
2020-10-01 01:41:58 +00:00
|
|
|
Ok(block_hash)
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
pub fn process_attestations(&self, attestations: HarnessAttestations<E>) {
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
let num_validators = self.validator_keypairs.len();
|
|
|
|
let mut unaggregated = Vec::with_capacity(num_validators);
|
|
|
|
// This is an over-allocation, but it should be fine. It won't be *that* memory hungry and
|
|
|
|
// it's nice to have fast tests.
|
|
|
|
let mut aggregated = Vec::with_capacity(num_validators);
|
|
|
|
|
|
|
|
for (unaggregated_attestations, maybe_signed_aggregate) in attestations.iter() {
|
|
|
|
for (attn, subnet) in unaggregated_attestations {
|
|
|
|
unaggregated.push((attn, Some(*subnet)));
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
if let Some(a) = maybe_signed_aggregate {
|
|
|
|
aggregated.push(a)
|
|
|
|
}
|
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
for result in self
|
|
|
|
.chain
|
|
|
|
.batch_verify_unaggregated_attestations_for_gossip(unaggregated.into_iter())
|
|
|
|
.unwrap()
|
|
|
|
{
|
|
|
|
let verified = result.unwrap();
|
|
|
|
self.chain.add_to_naive_aggregation_pool(&verified).unwrap();
|
|
|
|
}
|
2020-08-26 09:24:55 +00:00
|
|
|
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
for result in self
|
|
|
|
.chain
|
|
|
|
.batch_verify_aggregated_attestations_for_gossip(aggregated.into_iter())
|
|
|
|
.unwrap()
|
|
|
|
{
|
|
|
|
let verified = result.unwrap();
|
|
|
|
self.chain
|
|
|
|
.apply_attestation_to_fork_choice(&verified)
|
|
|
|
.unwrap();
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
self.chain.add_to_block_inclusion_pool(verified).unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement database temp states to reduce memory usage (#1798)
## Issue Addressed
Closes #800
Closes #1713
## Proposed Changes
Implement the temporary state storage algorithm described in #800. Specifically:
* Add `DBColumn::BeaconStateTemporary`, for storing 0-length temporary marker values.
* Store intermediate states immediately as they are created, marked temporary. Delete the temporary flag if the block is processed successfully.
* Add a garbage collection process to delete leftover temporary states on start-up.
* Bump the database schema version to 2 so that a DB with temporary states can't accidentally be used with older versions of the software. The auto-migration is a no-op, but puts in place some infra that we can use for future migrations (e.g. #1784)
## Additional Info
There are two known race conditions, one potentially causing permanent faults (hopefully rare), and the other insignificant.
### Race 1: Permanent state marked temporary
EDIT: this has been fixed by the addition of a lock around the relevant critical section
There are 2 threads that are trying to store 2 different blocks that share some intermediate states (e.g. they both skip some slots from the current head). Consider this sequence of events:
1. Thread 1 checks if state `s` already exists, and seeing that it doesn't, prepares an atomic commit of `(s, s_temporary_flag)`.
2. Thread 2 does the same, but also gets as far as committing the state txn, finishing the processing of its block, and _deleting_ the temporary flag.
3. Thread 1 is (finally) scheduled again, and marks `s` as temporary with its transaction.
4.
a) The process is killed, or thread 1's block fails verification and the temp flag is not deleted. This is a permanent failure! Any attempt to load state `s` will fail... hope it isn't on the main chain! Alternatively (4b) happens...
b) Thread 1 finishes, and re-deletes the temporary flag. In this case the failure is transient, state `s` will disappear temporarily, but will come back once thread 1 finishes running.
I _hope_ that steps 1-3 only happen very rarely, and 4a even more rarely. It's hard to know
This once again begs the question of why we're using LevelDB (#483), when it clearly doesn't care about atomicity! A ham-fisted fix would be to wrap the hot and cold DBs in locks, which would bring us closer to how other DBs handle read-write transactions. E.g. [LMDB only allows one R/W transaction at a time](https://docs.rs/lmdb/0.8.0/lmdb/struct.Environment.html#method.begin_rw_txn).
### Race 2: Temporary state returned from `get_state`
I don't think this race really matters, but in `load_hot_state`, if another thread stores a state between when we call `load_state_temporary_flag` and when we call `load_hot_state_summary`, then we could end up returning that state even though it's only a temporary state. I can't think of any case where this would be relevant, and I suspect if it did come up, it would be safe/recoverable (having data is safer than _not_ having data).
This could be fixed by using a LevelDB read snapshot, but that would require substantial changes to how we read all our values, so I don't think it's worth it right now.
2020-10-23 01:27:51 +00:00
|
|
|
pub fn set_current_slot(&self, slot: Slot) {
|
2020-08-26 09:24:55 +00:00
|
|
|
let current_slot = self.chain.slot().unwrap();
|
|
|
|
let current_epoch = current_slot.epoch(E::slots_per_epoch());
|
|
|
|
let epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
assert!(
|
|
|
|
epoch >= current_epoch,
|
|
|
|
"Jumping backwards to an earlier epoch isn't well defined. \
|
|
|
|
Please generate test blocks epoch-by-epoch instead."
|
|
|
|
);
|
|
|
|
self.chain.slot_clock.set_slot(slot.into());
|
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn add_block_at_slot(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
slot: Slot,
|
|
|
|
state: BeaconState<E>,
|
2020-10-19 05:58:39 +00:00
|
|
|
) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock<E>, BeaconState<E>), BlockError<E>> {
|
2020-08-26 09:24:55 +00:00
|
|
|
self.set_current_slot(slot);
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (block, new_state) = self.make_block(state, slot).await;
|
2022-09-23 03:52:42 +00:00
|
|
|
let block_hash = self
|
|
|
|
.process_block(slot, block.canonical_root(), block.clone())
|
|
|
|
.await?;
|
2020-10-19 05:58:39 +00:00
|
|
|
Ok((block_hash, block, new_state))
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn attest_block(
|
|
|
|
&self,
|
|
|
|
state: &BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
block_hash: SignedBeaconBlockHash,
|
|
|
|
block: &SignedBeaconBlock<E>,
|
|
|
|
validators: &[usize],
|
|
|
|
) {
|
2021-07-09 06:15:32 +00:00
|
|
|
let attestations =
|
2021-07-30 01:11:47 +00:00
|
|
|
self.make_attestations(validators, state, state_root, block_hash, block.slot());
|
2020-08-26 09:24:55 +00:00
|
|
|
self.process_attestations(attestations);
|
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn add_attested_block_at_slot(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
slot: Slot,
|
|
|
|
state: BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
validators: &[usize],
|
2020-10-19 05:58:39 +00:00
|
|
|
) -> Result<(SignedBeaconBlockHash, BeaconState<E>), BlockError<E>> {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?;
|
2021-03-17 05:09:57 +00:00
|
|
|
self.attest_block(&state, state_root, block_hash, &block, validators);
|
2020-10-19 05:58:39 +00:00
|
|
|
Ok((block_hash, state))
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn add_attested_blocks_at_slots(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
state: BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
slots: &[Slot],
|
|
|
|
validators: &[usize],
|
|
|
|
) -> AddBlocksResult<E> {
|
|
|
|
assert!(!slots.is_empty());
|
2021-03-17 05:09:57 +00:00
|
|
|
self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None)
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.await
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
async fn add_attested_blocks_at_slots_given_lbh(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
mut state: BeaconState<E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root: Hash256,
|
2020-08-26 09:24:55 +00:00
|
|
|
slots: &[Slot],
|
|
|
|
validators: &[usize],
|
|
|
|
mut latest_block_hash: Option<SignedBeaconBlockHash>,
|
|
|
|
) -> AddBlocksResult<E> {
|
|
|
|
assert!(
|
|
|
|
slots.windows(2).all(|w| w[0] <= w[1]),
|
|
|
|
"Slots have to be sorted"
|
|
|
|
); // slice.is_sorted() isn't stabilized at the moment of writing this
|
|
|
|
let mut block_hash_from_slot: HashMap<Slot, SignedBeaconBlockHash> = HashMap::new();
|
|
|
|
let mut state_hash_from_slot: HashMap<Slot, BeaconStateHash> = HashMap::new();
|
|
|
|
for slot in slots {
|
2020-10-19 05:58:39 +00:00
|
|
|
let (block_hash, new_state) = self
|
2021-03-17 05:09:57 +00:00
|
|
|
.add_attested_block_at_slot(*slot, state, state_root, validators)
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.await
|
2020-10-19 05:58:39 +00:00
|
|
|
.unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
state = new_state;
|
|
|
|
block_hash_from_slot.insert(*slot, block_hash);
|
|
|
|
state_hash_from_slot.insert(*slot, state.tree_hash_root().into());
|
|
|
|
latest_block_hash = Some(block_hash);
|
|
|
|
}
|
|
|
|
(
|
|
|
|
block_hash_from_slot,
|
|
|
|
state_hash_from_slot,
|
|
|
|
latest_block_hash.unwrap(),
|
|
|
|
state,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A monstrosity of great usefulness.
|
|
|
|
///
|
|
|
|
/// Calls `add_attested_blocks_at_slots` for each of the chains in `chains`,
|
|
|
|
/// taking care to batch blocks by epoch so that the slot clock gets advanced one
|
|
|
|
/// epoch at a time.
|
|
|
|
///
|
|
|
|
/// Chains is a vec of `(state, slots, validators)` tuples.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn add_blocks_on_multiple_chains(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
chains: Vec<(BeaconState<E>, Vec<Slot>, Vec<usize>)>,
|
|
|
|
) -> Vec<AddBlocksResult<E>> {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
|
|
|
|
|
|
|
let min_epoch = chains
|
|
|
|
.iter()
|
|
|
|
.map(|(_, slots, _)| slots.iter().min().unwrap())
|
|
|
|
.min()
|
|
|
|
.unwrap()
|
|
|
|
.epoch(slots_per_epoch);
|
|
|
|
let max_epoch = chains
|
|
|
|
.iter()
|
|
|
|
.map(|(_, slots, _)| slots.iter().max().unwrap())
|
|
|
|
.max()
|
|
|
|
.unwrap()
|
|
|
|
.epoch(slots_per_epoch);
|
|
|
|
|
|
|
|
let mut chains = chains
|
|
|
|
.into_iter()
|
|
|
|
.map(|(state, slots, validators)| {
|
|
|
|
(
|
|
|
|
state,
|
|
|
|
slots,
|
|
|
|
validators,
|
|
|
|
HashMap::new(),
|
|
|
|
HashMap::new(),
|
|
|
|
SignedBeaconBlockHash::from(Hash256::zero()),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
for epoch in min_epoch.as_u64()..=max_epoch.as_u64() {
|
|
|
|
let mut new_chains = vec![];
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
for (
|
|
|
|
mut head_state,
|
|
|
|
slots,
|
|
|
|
validators,
|
|
|
|
mut block_hashes,
|
|
|
|
mut state_hashes,
|
|
|
|
head_block,
|
|
|
|
) in chains
|
2020-08-26 09:24:55 +00:00
|
|
|
{
|
|
|
|
let epoch_slots = slots
|
|
|
|
.iter()
|
|
|
|
.filter(|s| s.epoch(slots_per_epoch).as_u64() == epoch)
|
|
|
|
.copied()
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
let head_state_root = head_state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (new_block_hashes, new_state_hashes, new_head_block, new_head_state) = self
|
|
|
|
.add_attested_blocks_at_slots_given_lbh(
|
|
|
|
head_state,
|
2021-03-17 05:09:57 +00:00
|
|
|
head_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&epoch_slots,
|
|
|
|
&validators,
|
|
|
|
Some(head_block),
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
)
|
|
|
|
.await;
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
block_hashes.extend(new_block_hashes);
|
|
|
|
state_hashes.extend(new_state_hashes);
|
|
|
|
|
|
|
|
new_chains.push((
|
|
|
|
new_head_state,
|
|
|
|
slots,
|
|
|
|
validators,
|
|
|
|
block_hashes,
|
|
|
|
state_hashes,
|
|
|
|
new_head_block,
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
chains = new_chains;
|
|
|
|
}
|
|
|
|
|
|
|
|
chains
|
|
|
|
.into_iter()
|
|
|
|
.map(|(state, _, _, block_hashes, state_hashes, head_block)| {
|
|
|
|
(block_hashes, state_hashes, head_block, state)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_finalized_checkpoints(&self) -> HashSet<SignedBeaconBlockHash> {
|
|
|
|
let chain_dump = self.chain.chain_dump().unwrap();
|
|
|
|
chain_dump
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
2021-07-09 06:15:32 +00:00
|
|
|
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
|
2020-08-26 09:24:55 +00:00
|
|
|
.filter(|block_hash| *block_hash != Hash256::zero().into())
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots()
|
|
|
|
/// instead
|
|
|
|
///
|
|
|
|
/// Advance the slot of the `BeaconChain`.
|
|
|
|
///
|
|
|
|
/// Does not produce blocks or attestations.
|
|
|
|
pub fn advance_slot(&self) {
|
|
|
|
self.chain.slot_clock.advance_slot();
|
|
|
|
}
|
|
|
|
|
2022-12-13 09:57:26 +00:00
|
|
|
/// Advance the clock to `lookahead` before the start of `slot`.
|
|
|
|
pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) {
|
|
|
|
let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead;
|
|
|
|
self.chain.slot_clock.set_current_time(time);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Use make_block() instead
|
|
|
|
///
|
|
|
|
/// Returns a newly created block, signed by the proposer for the given slot.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn build_block(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
state: BeaconState<E>,
|
|
|
|
slot: Slot,
|
|
|
|
_block_strategy: BlockStrategy,
|
|
|
|
) -> (SignedBeaconBlock<E>, BeaconState<E>) {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
self.make_block(state, slot).await
|
2020-08-26 09:24:55 +00:00
|
|
|
}
|
|
|
|
|
2021-10-02 01:39:11 +00:00
|
|
|
/// Uses `Self::extend_chain` to build the chain out to the `target_slot`.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 {
|
|
|
|
if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() {
|
2021-10-02 01:39:11 +00:00
|
|
|
self.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
let num_slots = target_slot
|
|
|
|
.as_usize()
|
|
|
|
.checked_sub(self.chain.slot().unwrap().as_usize())
|
|
|
|
.expect("target_slot must be >= current_slot")
|
|
|
|
.checked_add(1)
|
|
|
|
.unwrap();
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
self.extend_slots(num_slots).await
|
2021-10-02 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Uses `Self::extend_chain` to `num_slots` blocks.
|
|
|
|
///
|
|
|
|
/// Utilizes:
|
|
|
|
///
|
|
|
|
/// - BlockStrategy::OnCanonicalHead,
|
|
|
|
/// - AttestationStrategy::AllValidators,
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn extend_slots(&self, num_slots: usize) -> Hash256 {
|
|
|
|
if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() {
|
2021-10-02 01:39:11 +00:00
|
|
|
self.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
self.extend_chain(
|
|
|
|
num_slots,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
)
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.await
|
2021-10-02 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Use add_attested_blocks_at_slots() instead
|
|
|
|
///
|
|
|
|
/// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the
|
|
|
|
/// last-produced block (the head of the chain).
|
|
|
|
///
|
|
|
|
/// Chain will be extended by `num_blocks` blocks.
|
|
|
|
///
|
|
|
|
/// The `block_strategy` dictates where the new blocks will be placed.
|
|
|
|
///
|
|
|
|
/// The `attestation_strategy` dictates which validators will attest to the newly created
|
|
|
|
/// blocks.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn extend_chain(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2020-08-26 09:24:55 +00:00
|
|
|
num_blocks: usize,
|
|
|
|
block_strategy: BlockStrategy,
|
|
|
|
attestation_strategy: AttestationStrategy,
|
|
|
|
) -> Hash256 {
|
2021-03-17 05:09:57 +00:00
|
|
|
let (mut state, slots) = match block_strategy {
|
2020-08-26 09:24:55 +00:00
|
|
|
BlockStrategy::OnCanonicalHead => {
|
|
|
|
let current_slot: u64 = self.get_current_slot().into();
|
|
|
|
let slots: Vec<Slot> = (current_slot..(current_slot + (num_blocks as u64)))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
|
|
|
let state = self.get_current_state();
|
|
|
|
(state, slots)
|
|
|
|
}
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot,
|
|
|
|
first_slot,
|
|
|
|
} => {
|
|
|
|
let first_slot_: u64 = first_slot.into();
|
|
|
|
let slots: Vec<Slot> = (first_slot_..(first_slot_ + (num_blocks as u64)))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
|
|
|
let state = self
|
|
|
|
.chain
|
|
|
|
.state_at_slot(previous_slot, StateSkipConfig::WithStateRoots)
|
|
|
|
.unwrap();
|
|
|
|
(state, slots)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let validators = match attestation_strategy {
|
|
|
|
AttestationStrategy::AllValidators => self.get_all_validators(),
|
|
|
|
AttestationStrategy::SomeValidators(vals) => vals,
|
|
|
|
};
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (_, _, last_produced_block_hash, _) = self
|
|
|
|
.add_attested_blocks_at_slots(state, state_root, &slots, &validators)
|
|
|
|
.await;
|
2020-08-26 09:24:55 +00:00
|
|
|
last_produced_block_hash.into()
|
2019-06-16 19:55:59 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
/// Deprecated: Use add_attested_blocks_at_slots() instead
|
|
|
|
///
|
2019-07-29 02:08:52 +00:00
|
|
|
/// Creates two forks:
|
|
|
|
///
|
|
|
|
/// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks`
|
|
|
|
/// on the head
|
|
|
|
/// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and
|
|
|
|
/// then built `faulty_fork_blocks`.
|
|
|
|
///
|
|
|
|
/// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
pub async fn generate_two_forks_by_skipping_a_block(
|
2020-10-19 05:58:39 +00:00
|
|
|
&self,
|
2019-07-29 02:08:52 +00:00
|
|
|
honest_validators: &[usize],
|
|
|
|
faulty_validators: &[usize],
|
|
|
|
honest_fork_blocks: usize,
|
|
|
|
faulty_fork_blocks: usize,
|
|
|
|
) -> (Hash256, Hash256) {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let initial_head_slot = self.chain.head_snapshot().beacon_block.slot();
|
2019-07-29 02:08:52 +00:00
|
|
|
|
|
|
|
// Move to the next slot so we may produce some more blocks on the head.
|
|
|
|
self.advance_slot();
|
|
|
|
|
|
|
|
// Extend the chain with blocks where only honest validators agree.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let honest_head = self
|
|
|
|
.extend_chain(
|
|
|
|
honest_fork_blocks,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(honest_validators.to_vec()),
|
|
|
|
)
|
|
|
|
.await;
|
2019-07-29 02:08:52 +00:00
|
|
|
|
|
|
|
// Go back to the last block where all agreed, and build blocks upon it where only faulty nodes
|
|
|
|
// agree.
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let faulty_head = self
|
|
|
|
.extend_chain(
|
|
|
|
faulty_fork_blocks,
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot: initial_head_slot,
|
|
|
|
// `initial_head_slot + 2` means one slot is skipped.
|
|
|
|
first_slot: initial_head_slot + 2,
|
|
|
|
},
|
|
|
|
AttestationStrategy::SomeValidators(faulty_validators.to_vec()),
|
|
|
|
)
|
|
|
|
.await;
|
2019-07-29 02:08:52 +00:00
|
|
|
|
2020-05-13 07:05:12 +00:00
|
|
|
assert_ne!(honest_head, faulty_head, "forks should be distinct");
|
2019-07-29 02:08:52 +00:00
|
|
|
|
|
|
|
(honest_head, faulty_head)
|
|
|
|
}
|
2019-06-16 06:24:33 +00:00
|
|
|
}
|
2022-07-25 08:23:00 +00:00
|
|
|
|
|
|
|
// Junk `Debug` impl to satistfy certain trait bounds during testing.
|
|
|
|
impl<T: BeaconChainTypes> fmt::Debug for BeaconChainHarness<T> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
write!(f, "BeaconChainHarness")
|
|
|
|
}
|
|
|
|
}
|