2020-01-23 07:07:39 +00:00
|
|
|
use crate::checks::{epoch_delay, verify_all_finalized_at};
|
|
|
|
use crate::local_network::LocalNetwork;
|
2020-03-30 05:42:03 +00:00
|
|
|
use clap::ArgMatches;
|
2020-05-17 11:16:48 +00:00
|
|
|
use futures::prelude::*;
|
2020-04-15 08:46:27 +00:00
|
|
|
use node_test_rig::{
|
2020-12-16 05:37:38 +00:00
|
|
|
environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorFiles,
|
2020-04-15 08:46:27 +00:00
|
|
|
};
|
2020-12-16 05:37:38 +00:00
|
|
|
use node_test_rig::{testing_validator_config, ClientConfig};
|
|
|
|
use std::cmp::max;
|
2020-04-15 08:46:27 +00:00
|
|
|
use std::net::{IpAddr, Ipv4Addr};
|
|
|
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
2020-01-23 07:07:39 +00:00
|
|
|
use types::{Epoch, EthSpec};
|
|
|
|
|
2020-03-30 05:42:03 +00:00
|
|
|
pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> {
|
2020-04-22 13:58:10 +00:00
|
|
|
let initial_delay = value_t!(matches, "initial_delay", u64).unwrap();
|
|
|
|
let sync_timeout = value_t!(matches, "sync_timeout", u64).unwrap();
|
|
|
|
let speed_up_factor = value_t!(matches, "speedup", u64).unwrap();
|
|
|
|
let strategy = value_t!(matches, "strategy", String).unwrap();
|
2020-03-30 05:42:03 +00:00
|
|
|
|
|
|
|
println!("Syncing Simulator:");
|
|
|
|
println!(" initial_delay:{}", initial_delay);
|
2020-04-22 13:58:10 +00:00
|
|
|
println!(" sync timeout: {}", sync_timeout);
|
2020-03-30 05:42:03 +00:00
|
|
|
println!(" speed up factor:{}", speed_up_factor);
|
|
|
|
println!(" strategy:{}", strategy);
|
|
|
|
|
|
|
|
let log_level = "debug";
|
|
|
|
let log_format = None;
|
|
|
|
|
|
|
|
syncing_sim(
|
|
|
|
speed_up_factor,
|
|
|
|
initial_delay,
|
2020-04-22 13:58:10 +00:00
|
|
|
sync_timeout,
|
2020-03-30 05:42:03 +00:00
|
|
|
strategy,
|
|
|
|
log_level,
|
|
|
|
log_format,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn syncing_sim(
|
|
|
|
speed_up_factor: u64,
|
|
|
|
initial_delay: u64,
|
2020-04-22 13:58:10 +00:00
|
|
|
sync_timeout: u64,
|
2020-03-30 05:42:03 +00:00
|
|
|
strategy: String,
|
|
|
|
log_level: &str,
|
|
|
|
log_format: Option<&str>,
|
|
|
|
) -> Result<(), String> {
|
|
|
|
let mut env = EnvironmentBuilder::minimal()
|
|
|
|
.async_logger(log_level, log_format)?
|
|
|
|
.multi_threaded_tokio_runtime()?
|
|
|
|
.build()?;
|
|
|
|
|
|
|
|
let spec = &mut env.eth2_config.spec;
|
|
|
|
let end_after_checks = true;
|
2020-04-15 08:46:27 +00:00
|
|
|
let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor);
|
2020-03-30 05:42:03 +00:00
|
|
|
|
2020-04-15 08:46:27 +00:00
|
|
|
spec.milliseconds_per_slot /= speed_up_factor;
|
2020-12-16 05:37:38 +00:00
|
|
|
//currently lighthouse only supports slot lengths that are multiples of seconds
|
|
|
|
spec.milliseconds_per_slot = max(1000, spec.milliseconds_per_slot / 1000 * 1000);
|
2020-04-15 08:46:27 +00:00
|
|
|
spec.eth1_follow_distance = 16;
|
2020-06-11 00:07:10 +00:00
|
|
|
spec.genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2;
|
2020-03-30 05:42:03 +00:00
|
|
|
spec.min_genesis_time = 0;
|
2020-04-15 08:46:27 +00:00
|
|
|
spec.min_genesis_active_validator_count = 64;
|
|
|
|
spec.seconds_per_eth1_block = 1;
|
2020-03-30 05:42:03 +00:00
|
|
|
|
2020-04-15 08:46:27 +00:00
|
|
|
let num_validators = 8;
|
2020-03-30 05:42:03 +00:00
|
|
|
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
|
|
|
|
let context = env.core_context();
|
2020-04-15 08:46:27 +00:00
|
|
|
let mut beacon_config = testing_client_config();
|
|
|
|
|
|
|
|
let genesis_time = SystemTime::now()
|
|
|
|
.duration_since(UNIX_EPOCH)
|
|
|
|
.map_err(|_| "should get system time")?
|
|
|
|
+ Duration::from_secs(5);
|
|
|
|
beacon_config.genesis = ClientGenesis::Interop {
|
|
|
|
validator_count: num_validators,
|
|
|
|
genesis_time: genesis_time.as_secs(),
|
|
|
|
};
|
|
|
|
beacon_config.dummy_eth1_backend = true;
|
|
|
|
beacon_config.sync_eth1_chain = true;
|
|
|
|
|
|
|
|
beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
|
2020-03-30 05:42:03 +00:00
|
|
|
|
Wallet-based, encrypted key management (#1138)
* Update hashmap hashset to stable futures
* Adds panic test to hashset delay
* Port remote_beacon_node to stable futures
* Fix lcli merge conflicts
* Non rpc stuff compiles
* Remove padding
* Add error enum, zeroize more things
* Fix comment
* protocol.rs compiles
* Port websockets, timer and notifier to stable futures (#1035)
* Fix lcli
* Port timer to stable futures
* Fix timer
* Port websocket_server to stable futures
* Port notifier to stable futures
* Add TODOS
* Port remote_beacon_node to stable futures
* Partial eth2-libp2p stable future upgrade
* Finished first round of fighting RPC types
* Further progress towards porting eth2-libp2p adds caching to discovery
* Update behaviour
* Add keystore builder
* Remove keystore stuff from val client
* Add more tests, comments
* RPC handler to stable futures
* Update RPC to master libp2p
* Add more comments, test vectors
* Network service additions
* Progress on improving JSON validation
* More JSON verification
* Start moving JSON into own mod
* Remove old code
* Add more tests, reader/writers
* Tidy
* Move keystore into own file
* Move more logic into keystore file
* Tidy
* Tidy
* Fix the fallback transport construction (#1102)
* Allow for odd-character hex
* Correct warning
* Remove hashmap delay
* Compiling version of eth2-libp2p
* Update all crates versions
* Fix conversion function and add tests (#1113)
* Add more json missing field checks
* Use scrypt by default
* Tidy, address comments
* Test path and uuid in vectors
* Fix comment
* Add checks for kdf params
* Enforce empty kdf message
* Port validator_client to stable futures (#1114)
* Add PH & MS slot clock changes
* Account for genesis time
* Add progress on duties refactor
* Add simple is_aggregator bool to val subscription
* Start work on attestation_verification.rs
* Add progress on ObservedAttestations
* Progress with ObservedAttestations
* Fix tests
* Add observed attestations to the beacon chain
* Add attestation observation to processing code
* Add progress on attestation verification
* Add first draft of ObservedAttesters
* Add more tests
* Add observed attesters to beacon chain
* Add observers to attestation processing
* Add more attestation verification
* Create ObservedAggregators map
* Remove commented-out code
* Add observed aggregators into chain
* Add progress
* Finish adding features to attestation verification
* Ensure beacon chain compiles
* Link attn verification into chain
* Integrate new attn verification in chain
* Remove old attestation processing code
* Start trying to fix beacon_chain tests
* Split adding into pools into two functions
* Add aggregation to harness
* Get test harness working again
* Adjust the number of aggregators for test harness
* Fix edge-case in harness
* Integrate new attn processing in network
* Fix compile bug in validator_client
* Update validator API endpoints
* Fix aggreagation in test harness
* Fix enum thing
* Fix attestation observation bug:
* Patch failing API tests
* Start adding comments to attestation verification
* Remove unused attestation field
* Unify "is block known" logic
* Update comments
* Supress fork choice errors for network processing
* Add todos
* Tidy
* Add gossip attn tests
* Disallow test harness to produce old attns
* Comment out in-progress tests
* Partially address pruning tests
* Fix failing store test
* Add aggregate tests
* Add comments about which spec conditions we check
* Dont re-aggregate
* Split apart test harness attn production
* Fix compile error in network
* Make progress on commented-out test
* Fix skipping attestation test
* Add fork choice verification tests
* Tidy attn tests, remove dead code
* Remove some accidentally added code
* Fix clippy lint
* Rename test file
* Add block tests, add cheap block proposer check
* Rename block testing file
* Add observed_block_producers
* Tidy
* Switch around block signature verification
* Finish block testing
* Remove gossip from signature tests
* First pass of self review
* Fix deviation in spec
* Update test spec tags
* Start moving over to hashset
* Finish moving observed attesters to hashmap
* Move aggregation pool over to hashmap
* Make fc attn borrow again
* Fix rest_api compile error
* Fix missing comments
* Fix monster test
* Uncomment increasing slots test
* Address remaining comments
* Remove unsafe, use cfg test
* Remove cfg test flag
* Fix dodgy comment
* Revert "Update hashmap hashset to stable futures"
This reverts commit d432378a3cc5cd67fc29c0b15b96b886c1323554.
* Revert "Adds panic test to hashset delay"
This reverts commit 281502396fc5b90d9c421a309c2c056982c9525b.
* Ported attestation_service
* Ported duties_service
* Ported fork_service
* More ports
* Port block_service
* Minor fixes
* VC compiles
* Update TODOS
* Borrow self where possible
* Ignore aggregates that are already known.
* Unify aggregator modulo logic
* Fix typo in logs
* Refactor validator subscription logic
* Avoid reproducing selection proof
* Skip HTTP call if no subscriptions
* Rename DutyAndState -> DutyAndProof
* Tidy logs
* Print root as dbg
* Fix compile errors in tests
* Fix compile error in test
* Re-Fix attestation and duties service
* Minor fixes
Co-authored-by: Paul Hauner <paul@paulhauner.com>
* Expose json_keystore mod
* First commits on path derivation
* Progress with implementation
* More progress
* Passing intermediate test vectors
* Tidy, add comments
* Add DerivedKey structs
* Move key derivation into own crate
* Add zeroize structs
* Return error for empty seed
* Add tests
* Tidy
* First commits on path derivation
* Progress with implementation
* Move key derivation into own crate
* Start defining JSON wallet
* Add progress
* Split out encrypt/decrypt
* First commits on path derivation
* Progress with implementation
* More progress
* Passing intermediate test vectors
* Tidy, add comments
* Add DerivedKey structs
* Move key derivation into own crate
* Add zeroize structs
* Return error for empty seed
* Add tests
* Tidy
* Add progress
* Replace some password usage with slice
* First commits on path derivation
* Progress with implementation
* More progress
* Passing intermediate test vectors
* Tidy, add comments
* Add DerivedKey structs
* Move key derivation into own crate
* Add zeroize structs
* Return error for empty seed
* Add tests
* Tidy
* Add progress
* Expose PlainText struct
* First commits on path derivation
* Progress with implementation
* More progress
* Passing intermediate test vectors
* Tidy, add comments
* Add DerivedKey structs
* Move key derivation into own crate
* Add zeroize structs
* Return error for empty seed
* Add tests
* Tidy
* Add builder
* Expose consts, remove Password
* Minor progress
* Expose SALT_SIZE
* First compiling version
* Add test vectors
* Network crate update to stable futures
* Move dbg assert statement
* Port account_manager to stable futures (#1121)
* Port account_manager to stable futures
* Run async fns in tokio environment
* Port rest_api crate to stable futures (#1118)
* Port rest_api lib to stable futures
* Reduce tokio features
* Update notifier to stable futures
* Builder update
* Further updates
* Add mnemonic, tidy
* Convert self referential async functions
* Tidy
* Add testing
* Add first attempt at validator_dir
* Present pubkey field
* stable futures fixes (#1124)
* Fix eth1 update functions
* Fix genesis and client
* Fix beacon node lib
* Return appropriate runtimes from environment
* Fix test rig
* Refactor eth1 service update
* Upgrade simulator to stable futures
* Lighthouse compiles on stable futures
* Add first pass of wallet manager
* Progress with CLI
* Remove println debugging statement
* Tidy output
* Tidy 600 perms
* Update libp2p service, start rpc test upgrade
* Add validator creation flow
* Update network crate for new libp2p
* Start tidying, adding comments
* Update tokio::codec to futures_codec (#1128)
* Further work towards RPC corrections
* Correct http timeout and network service select
* Add wallet mgr testing
* Shift LockedWallet into own file
* Add comments to fs
* Start integration into VC
* Use tokio runtime for libp2p
* Revert "Update tokio::codec to futures_codec (#1128)"
This reverts commit e57aea924acf5cbabdcea18895ac07e38a425ed7.
* Upgrade RPC libp2p tests
* Upgrade secio fallback test
* Add lcli keypair upgrade command
* Upgrade gossipsub examples
* Clean up RPC protocol
* Test fixes (#1133)
* Correct websocket timeout and run on os thread
* Fix network test
* Add --secrets-dir to VC
* Remove --legacy-keys from VC
* Clean up PR
* Correct tokio tcp move attestation service tests
* Upgrade attestation service tests
* Fix sim
* Correct network test
* Correct genesis test
* Start docs
* Add progress for validator generation
* Tidy error messages
* Test corrections
* Log info when block is received
* Modify logs and update attester service events
* Stable futures: fixes to vc, eth1 and account manager (#1142)
* Add local testnet scripts
* Remove whiteblock script
* Rename local testnet script
* Move spawns onto handle
* Fix VC panic
* Initial fix to block production issue
* Tidy block producer fix
* Tidy further
* Add local testnet clean script
* Run cargo fmt
* Tidy duties service
* Tidy fork service
* Tidy ForkService
* Tidy AttestationService
* Tidy notifier
* Ensure await is not suppressed in eth1
* Ensure await is not suppressed in account_manager
* Use .ok() instead of .unwrap_or(())
* RPC decoding test for proto
* Update discv5 and eth2-libp2p deps
* Run cargo fmt
* Pre-build keystores for sim
* Fix lcli double runtime issue (#1144)
* Handle stream termination and dialing peer errors
* Correct peer_info variant types
* Add progress on new deposit flow
* Remove unnecessary warnings
* Handle subnet unsubscription removal and improve logigng
* Add logs around ping
* Upgrade discv5 and improve logging
* Handle peer connection status for multiple connections
* Improve network service logging
* Add more incomplete progress
* Improve logging around peer manager
* Upgrade swarm poll centralise peer management
* Identify clients on error
* Fix `remove_peer` in sync (#1150)
* remove_peer removes from all chains
* Remove logs
* Fix early return from loop
* Improved logging, fix panic
* Partially correct tests
* Add deposit command
* Remove old validator directory
* Start adding AM tests
* Stable futures: Vc sync (#1149)
* Improve syncing heuristic
* Add comments
* Use safer method for tolerance
* Fix tests
* Binary testing progress
* Progress with CLI tests
* Use constants for flags
* More account manager testing
* Improve CLI tests
* Move upgrade-legacy-keypairs into account man
* Use rayon for VC key generation
* Add comments to `validator_dir`
* Add testing to validator_dir
* Add fix to eth1-sim
* Check errors in eth1-sim
* Fix mutability issue
* Ensure password file ends in .pass
* Add more tests to wallet manager
* Tidy deposit
* Tidy account manager
* Tidy account manager
* Remove panic
* Generate keypairs earlier in sim
* Tidy eth1-sime
* Try to fix eth1 sim
* Address review comments
* Fix typo in CLI command
* Update docs
* Disable eth1 sim
* Remove eth1 sim completely
Co-authored-by: Age Manning <Age@AgeManning.com>
Co-authored-by: pawanjay176 <pawandhananjay@gmail.com>
2020-05-18 09:01:45 +00:00
|
|
|
// Generate the directories and keystores required for the validator clients.
|
|
|
|
let validator_indices = (0..num_validators).collect::<Vec<_>>();
|
|
|
|
let validator_files = ValidatorFiles::with_keystores(&validator_indices).unwrap();
|
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
let main_future = async {
|
2020-03-30 05:42:03 +00:00
|
|
|
/*
|
2020-05-17 11:16:48 +00:00
|
|
|
* Create a new `LocalNetwork` with one beacon node.
|
2020-03-30 05:42:03 +00:00
|
|
|
*/
|
2020-05-17 11:16:48 +00:00
|
|
|
let network = LocalNetwork::new(context, beacon_config.clone()).await?;
|
|
|
|
|
2020-03-30 05:42:03 +00:00
|
|
|
/*
|
2020-05-17 11:16:48 +00:00
|
|
|
* Add a validator client which handles all validators from the genesis state.
|
2020-03-30 05:42:03 +00:00
|
|
|
*/
|
2020-05-17 11:16:48 +00:00
|
|
|
network
|
2020-12-18 09:17:03 +00:00
|
|
|
.add_validator_client(testing_validator_config(), 0, validator_files, true)
|
2020-05-17 11:16:48 +00:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
// Check all syncing strategies one after other.
|
|
|
|
pick_strategy(
|
|
|
|
&strategy,
|
|
|
|
network.clone(),
|
|
|
|
beacon_config.clone(),
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
// The `final_future` either completes immediately or never completes, depending on the value
|
|
|
|
// of `end_after_checks`.
|
|
|
|
|
|
|
|
if !end_after_checks {
|
|
|
|
future::pending::<()>().await;
|
|
|
|
}
|
2020-03-30 05:42:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* End the simulation by dropping the network. This will kill all running beacon nodes and
|
|
|
|
* validator clients.
|
|
|
|
*/
|
2020-05-17 11:16:48 +00:00
|
|
|
println!(
|
|
|
|
"Simulation complete. Finished with {} beacon nodes and {} validator clients",
|
|
|
|
network.beacon_node_count(),
|
|
|
|
network.validator_client_count()
|
|
|
|
);
|
2020-03-30 05:42:03 +00:00
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
// Be explicit about dropping the network, as this kills all the nodes. This ensures
|
|
|
|
// all the checks have adequate time to pass.
|
|
|
|
drop(network);
|
|
|
|
Ok::<(), String>(())
|
|
|
|
};
|
2020-03-30 05:42:03 +00:00
|
|
|
|
2020-12-16 05:37:38 +00:00
|
|
|
env.runtime()
|
|
|
|
.block_on(tokio_compat_02::FutureExt::compat(main_future))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
env.fire_signal();
|
|
|
|
env.shutdown_on_idle();
|
|
|
|
|
|
|
|
Ok(())
|
2020-03-30 05:42:03 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn pick_strategy<E: EthSpec>(
|
2020-01-23 07:07:39 +00:00
|
|
|
strategy: &str,
|
|
|
|
network: LocalNetwork<E>,
|
|
|
|
beacon_config: ClientConfig,
|
|
|
|
slot_duration: Duration,
|
|
|
|
initial_delay: u64,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout: u64,
|
2020-05-17 11:16:48 +00:00
|
|
|
) -> Result<(), String> {
|
2020-01-23 07:07:39 +00:00
|
|
|
match strategy {
|
2020-05-17 11:16:48 +00:00
|
|
|
"one-node" => {
|
|
|
|
verify_one_node_sync(
|
|
|
|
network,
|
|
|
|
beacon_config,
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
"two-nodes" => {
|
|
|
|
verify_two_nodes_sync(
|
|
|
|
network,
|
|
|
|
beacon_config,
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
"mixed" => {
|
|
|
|
verify_in_between_sync(
|
|
|
|
network,
|
|
|
|
beacon_config,
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
"all" => {
|
|
|
|
verify_syncing(
|
|
|
|
network,
|
|
|
|
beacon_config,
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
_ => Err("Invalid strategy".into()),
|
2020-01-23 07:07:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify one node added after `initial_delay` epochs is in sync
|
2020-04-06 07:04:06 +00:00
|
|
|
/// after `sync_timeout` epochs.
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn verify_one_node_sync<E: EthSpec>(
|
2020-01-23 07:07:39 +00:00
|
|
|
network: LocalNetwork<E>,
|
|
|
|
beacon_config: ClientConfig,
|
|
|
|
slot_duration: Duration,
|
|
|
|
initial_delay: u64,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout: u64,
|
2020-05-17 11:16:48 +00:00
|
|
|
) -> Result<(), String> {
|
2020-04-06 07:04:06 +00:00
|
|
|
let epoch_duration = slot_duration * (E::slots_per_epoch() as u32);
|
|
|
|
let network_c = network.clone();
|
2020-01-23 07:07:39 +00:00
|
|
|
// Delay for `initial_delay` epochs before adding another node to start syncing
|
|
|
|
epoch_delay(
|
|
|
|
Epoch::new(initial_delay),
|
|
|
|
slot_duration,
|
|
|
|
E::slots_per_epoch(),
|
|
|
|
)
|
2020-05-17 11:16:48 +00:00
|
|
|
.await;
|
|
|
|
// Add a beacon node
|
|
|
|
network.add_beacon_node(beacon_config).await?;
|
|
|
|
// Check every `epoch_duration` if nodes are synced
|
|
|
|
// limited to at most `sync_timeout` epochs
|
|
|
|
let mut interval = tokio::time::interval(epoch_duration);
|
|
|
|
let mut count = 0;
|
2020-07-23 14:18:00 +00:00
|
|
|
while interval.next().await.is_some() {
|
2020-05-17 11:16:48 +00:00
|
|
|
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
let epoch = network.bootnode_epoch().await?;
|
|
|
|
verify_all_finalized_at(network, epoch)
|
|
|
|
.map_err(|e| format!("One node sync error: {}", e))
|
|
|
|
.await
|
2020-01-23 07:07:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify two nodes added after `initial_delay` epochs are in sync
|
2020-04-06 07:04:06 +00:00
|
|
|
/// after `sync_timeout` epochs.
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn verify_two_nodes_sync<E: EthSpec>(
|
2020-01-23 07:07:39 +00:00
|
|
|
network: LocalNetwork<E>,
|
|
|
|
beacon_config: ClientConfig,
|
|
|
|
slot_duration: Duration,
|
|
|
|
initial_delay: u64,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout: u64,
|
2020-05-17 11:16:48 +00:00
|
|
|
) -> Result<(), String> {
|
2020-04-06 07:04:06 +00:00
|
|
|
let epoch_duration = slot_duration * (E::slots_per_epoch() as u32);
|
|
|
|
let network_c = network.clone();
|
2020-01-23 07:07:39 +00:00
|
|
|
// Delay for `initial_delay` epochs before adding another node to start syncing
|
|
|
|
epoch_delay(
|
|
|
|
Epoch::new(initial_delay),
|
|
|
|
slot_duration,
|
|
|
|
E::slots_per_epoch(),
|
|
|
|
)
|
2020-05-17 11:16:48 +00:00
|
|
|
.await;
|
|
|
|
// Add beacon nodes
|
|
|
|
network.add_beacon_node(beacon_config.clone()).await?;
|
|
|
|
network.add_beacon_node(beacon_config).await?;
|
|
|
|
// Check every `epoch_duration` if nodes are synced
|
|
|
|
// limited to at most `sync_timeout` epochs
|
|
|
|
let mut interval = tokio::time::interval(epoch_duration);
|
|
|
|
let mut count = 0;
|
2020-07-23 14:18:00 +00:00
|
|
|
while interval.next().await.is_some() {
|
2020-05-17 11:16:48 +00:00
|
|
|
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
let epoch = network.bootnode_epoch().await?;
|
|
|
|
verify_all_finalized_at(network, epoch)
|
|
|
|
.map_err(|e| format!("One node sync error: {}", e))
|
|
|
|
.await
|
2020-01-23 07:07:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Add 2 syncing nodes after `initial_delay` epochs,
|
2020-04-06 07:04:06 +00:00
|
|
|
/// Add another node after `sync_timeout - 5` epochs and verify all are
|
|
|
|
/// in sync after `sync_timeout + 5` epochs.
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn verify_in_between_sync<E: EthSpec>(
|
2020-01-23 07:07:39 +00:00
|
|
|
network: LocalNetwork<E>,
|
|
|
|
beacon_config: ClientConfig,
|
|
|
|
slot_duration: Duration,
|
|
|
|
initial_delay: u64,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout: u64,
|
2020-05-17 11:16:48 +00:00
|
|
|
) -> Result<(), String> {
|
2020-04-06 07:04:06 +00:00
|
|
|
let epoch_duration = slot_duration * (E::slots_per_epoch() as u32);
|
|
|
|
let network_c = network.clone();
|
2020-01-23 07:07:39 +00:00
|
|
|
// Delay for `initial_delay` epochs before adding another node to start syncing
|
|
|
|
let config1 = beacon_config.clone();
|
|
|
|
epoch_delay(
|
|
|
|
Epoch::new(initial_delay),
|
|
|
|
slot_duration,
|
|
|
|
E::slots_per_epoch(),
|
|
|
|
)
|
2020-05-17 11:16:48 +00:00
|
|
|
.await;
|
|
|
|
// Add two beacon nodes
|
|
|
|
network.add_beacon_node(beacon_config.clone()).await?;
|
|
|
|
network.add_beacon_node(beacon_config).await?;
|
|
|
|
// Delay before adding additional syncing nodes.
|
|
|
|
epoch_delay(
|
|
|
|
Epoch::new(sync_timeout - 5),
|
|
|
|
slot_duration,
|
|
|
|
E::slots_per_epoch(),
|
|
|
|
)
|
|
|
|
.await;
|
|
|
|
// Add a beacon node
|
|
|
|
network.add_beacon_node(config1.clone()).await?;
|
|
|
|
// Check every `epoch_duration` if nodes are synced
|
|
|
|
// limited to at most `sync_timeout` epochs
|
|
|
|
let mut interval = tokio::time::interval(epoch_duration);
|
|
|
|
let mut count = 0;
|
2020-07-23 14:18:00 +00:00
|
|
|
while interval.next().await.is_some() {
|
2020-05-17 11:16:48 +00:00
|
|
|
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
let epoch = network.bootnode_epoch().await?;
|
|
|
|
verify_all_finalized_at(network, epoch)
|
|
|
|
.map_err(|e| format!("One node sync error: {}", e))
|
|
|
|
.await
|
2020-01-23 07:07:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Run syncing strategies one after other.
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn verify_syncing<E: EthSpec>(
|
2020-01-23 07:07:39 +00:00
|
|
|
network: LocalNetwork<E>,
|
|
|
|
beacon_config: ClientConfig,
|
|
|
|
slot_duration: Duration,
|
|
|
|
initial_delay: u64,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout: u64,
|
2020-05-17 11:16:48 +00:00
|
|
|
) -> Result<(), String> {
|
2020-01-23 07:07:39 +00:00
|
|
|
verify_one_node_sync(
|
|
|
|
network.clone(),
|
|
|
|
beacon_config.clone(),
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
2020-04-06 07:04:06 +00:00
|
|
|
sync_timeout,
|
2020-01-23 07:07:39 +00:00
|
|
|
)
|
2020-05-17 11:16:48 +00:00
|
|
|
.await?;
|
|
|
|
println!("Completed one node sync");
|
|
|
|
verify_two_nodes_sync(
|
|
|
|
network.clone(),
|
|
|
|
beacon_config.clone(),
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
println!("Completed two node sync");
|
|
|
|
verify_in_between_sync(
|
|
|
|
network,
|
|
|
|
beacon_config,
|
|
|
|
slot_duration,
|
|
|
|
initial_delay,
|
|
|
|
sync_timeout,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
println!("Completed in between sync");
|
|
|
|
Ok(())
|
2020-01-23 07:07:39 +00:00
|
|
|
}
|
2020-04-06 07:04:06 +00:00
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
pub async fn check_still_syncing<E: EthSpec>(network: &LocalNetwork<E>) -> Result<bool, String> {
|
|
|
|
// get syncing status of nodes
|
|
|
|
let mut status = Vec::new();
|
|
|
|
for remote_node in network.remote_nodes()? {
|
|
|
|
status.push(
|
|
|
|
remote_node
|
2020-09-29 03:46:54 +00:00
|
|
|
.get_node_syncing()
|
2020-05-17 11:16:48 +00:00
|
|
|
.await
|
2020-09-29 03:46:54 +00:00
|
|
|
.map(|body| body.data.is_syncing)
|
2020-05-17 11:16:48 +00:00
|
|
|
.map_err(|e| format!("Get syncing status via http failed: {:?}", e))?,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
Ok(status.iter().any(|is_syncing| *is_syncing))
|
2020-04-06 07:04:06 +00:00
|
|
|
}
|