2019-11-27 01:47:46 +00:00
|
|
|
#![cfg(test)]
|
2024-01-31 17:32:31 +00:00
|
|
|
use lighthouse_network::gossipsub;
|
2022-09-29 01:50:11 +00:00
|
|
|
use lighthouse_network::service::Network as LibP2PService;
|
Rename eth2_libp2p to lighthouse_network (#2702)
## Description
The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration.
As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers.
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p.
Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2021-10-19 00:30:39 +00:00
|
|
|
use lighthouse_network::Enr;
|
|
|
|
use lighthouse_network::EnrExt;
|
|
|
|
use lighthouse_network::Multiaddr;
|
2022-09-29 01:50:11 +00:00
|
|
|
use lighthouse_network::{NetworkConfig, NetworkEvent};
|
2019-11-27 01:47:46 +00:00
|
|
|
use slog::{debug, error, o, Drain};
|
2021-08-04 01:44:57 +00:00
|
|
|
use std::sync::Arc;
|
2020-11-28 05:30:57 +00:00
|
|
|
use std::sync::Weak;
|
2019-11-27 01:47:46 +00:00
|
|
|
use std::time::Duration;
|
2020-11-28 05:30:57 +00:00
|
|
|
use tokio::runtime::Runtime;
|
2022-04-04 00:26:15 +00:00
|
|
|
use types::{
|
|
|
|
ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot,
|
|
|
|
};
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
|
|
|
|
type E = MinimalEthSpec;
|
2022-03-02 22:07:17 +00:00
|
|
|
type ReqId = usize;
|
|
|
|
|
2021-01-06 06:36:11 +00:00
|
|
|
use tempfile::Builder as TempBuilder;
|
2019-11-27 01:47:46 +00:00
|
|
|
|
2021-08-04 01:44:57 +00:00
|
|
|
/// Returns a dummy fork context
|
2022-04-04 00:26:15 +00:00
|
|
|
pub fn fork_context(fork_name: ForkName) -> ForkContext {
|
2021-09-17 01:11:16 +00:00
|
|
|
let mut chain_spec = E::default_spec();
|
2022-04-04 00:26:15 +00:00
|
|
|
let altair_fork_epoch = Epoch::new(1);
|
|
|
|
let merge_fork_epoch = Epoch::new(2);
|
2022-10-26 19:15:26 +00:00
|
|
|
let capella_fork_epoch = Epoch::new(3);
|
2023-03-26 15:49:16 +00:00
|
|
|
let deneb_fork_epoch = Epoch::new(4);
|
2022-04-04 00:26:15 +00:00
|
|
|
|
|
|
|
chain_spec.altair_fork_epoch = Some(altair_fork_epoch);
|
|
|
|
chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch);
|
2022-10-26 19:15:26 +00:00
|
|
|
chain_spec.capella_fork_epoch = Some(capella_fork_epoch);
|
2023-03-26 15:49:16 +00:00
|
|
|
chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch);
|
2022-04-04 00:26:15 +00:00
|
|
|
|
|
|
|
let current_slot = match fork_name {
|
|
|
|
ForkName::Base => Slot::new(0),
|
|
|
|
ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()),
|
|
|
|
ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()),
|
2022-10-26 19:15:26 +00:00
|
|
|
ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()),
|
2023-03-26 15:49:16 +00:00
|
|
|
ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()),
|
2022-04-04 00:26:15 +00:00
|
|
|
};
|
|
|
|
ForkContext::new::<E>(current_slot, Hash256::zero(), &chain_spec)
|
2021-08-04 01:44:57 +00:00
|
|
|
}
|
|
|
|
|
2024-02-05 17:54:11 +00:00
|
|
|
pub struct Libp2pInstance(
|
|
|
|
LibP2PService<ReqId, E>,
|
|
|
|
#[allow(dead_code)]
|
|
|
|
// This field is managed for lifetime purposes may not be used directly, hence the `#[allow(dead_code)]` attribute.
|
2024-02-27 22:12:44 +00:00
|
|
|
async_channel::Sender<()>,
|
2024-02-05 17:54:11 +00:00
|
|
|
);
|
2020-06-04 11:48:05 +00:00
|
|
|
|
|
|
|
impl std::ops::Deref for Libp2pInstance {
|
2022-03-02 22:07:17 +00:00
|
|
|
type Target = LibP2PService<ReqId, E>;
|
2020-06-04 11:48:05 +00:00
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::ops::DerefMut for Libp2pInstance {
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
&mut self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-20 23:45:21 +00:00
|
|
|
#[allow(unused)]
|
2019-11-27 01:47:46 +00:00
|
|
|
pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
|
|
|
|
let decorator = slog_term::TermDecorator::new().build();
|
|
|
|
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
|
|
|
let drain = slog_async::Async::new(drain).build().fuse();
|
|
|
|
|
|
|
|
if enabled {
|
|
|
|
slog::Logger::root(drain.filter_level(level).fuse(), o!())
|
|
|
|
} else {
|
|
|
|
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-15 03:07:24 +00:00
|
|
|
pub fn build_config(mut boot_nodes: Vec<Enr>) -> NetworkConfig {
|
2019-11-27 01:47:46 +00:00
|
|
|
let mut config = NetworkConfig::default();
|
2023-09-15 03:07:24 +00:00
|
|
|
|
|
|
|
// Find unused ports by using the 0 port.
|
|
|
|
let port = 0;
|
|
|
|
|
|
|
|
let random_path: u16 = rand::random();
|
2021-01-06 06:36:11 +00:00
|
|
|
let path = TempBuilder::new()
|
2023-09-15 03:07:24 +00:00
|
|
|
.prefix(&format!("libp2p_test_{}", random_path))
|
2021-01-06 06:36:11 +00:00
|
|
|
.tempdir()
|
|
|
|
.unwrap();
|
2020-03-02 02:35:20 +00:00
|
|
|
|
2023-09-15 03:07:24 +00:00
|
|
|
config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port, port);
|
2023-03-14 01:13:34 +00:00
|
|
|
config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None);
|
2020-08-17 02:13:26 +00:00
|
|
|
config.boot_nodes_enr.append(&mut boot_nodes);
|
2020-03-02 02:35:20 +00:00
|
|
|
config.network_dir = path.into_path();
|
2019-11-27 01:47:46 +00:00
|
|
|
// Reduce gossipsub heartbeat parameters
|
2023-08-02 00:59:34 +00:00
|
|
|
config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config)
|
2020-08-30 13:06:50 +00:00
|
|
|
.heartbeat_initial_delay(Duration::from_millis(500))
|
|
|
|
.heartbeat_interval(Duration::from_millis(500))
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
2019-11-27 01:47:46 +00:00
|
|
|
config
|
|
|
|
}
|
|
|
|
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_libp2p_instance(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
boot_nodes: Vec<Enr>,
|
|
|
|
log: slog::Logger,
|
2022-04-04 00:26:15 +00:00
|
|
|
fork_name: ForkName,
|
2023-08-03 01:51:47 +00:00
|
|
|
spec: &ChainSpec,
|
2020-11-28 05:30:57 +00:00
|
|
|
) -> Libp2pInstance {
|
2023-09-15 03:07:24 +00:00
|
|
|
let config = build_config(boot_nodes);
|
2019-11-27 01:47:46 +00:00
|
|
|
// launch libp2p service
|
2020-06-04 11:48:05 +00:00
|
|
|
|
2024-02-27 22:12:44 +00:00
|
|
|
let (signal, exit) = async_channel::bounded(1);
|
2020-08-19 05:51:14 +00:00
|
|
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
2020-11-28 05:30:57 +00:00
|
|
|
let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx);
|
2021-12-22 06:17:14 +00:00
|
|
|
let libp2p_context = lighthouse_network::Context {
|
|
|
|
config: &config,
|
|
|
|
enr_fork_id: EnrForkId::default(),
|
2022-04-04 00:26:15 +00:00
|
|
|
fork_context: Arc::new(fork_context(fork_name)),
|
2023-08-03 01:51:47 +00:00
|
|
|
chain_spec: spec,
|
2023-12-07 09:39:59 +00:00
|
|
|
libp2p_registry: None,
|
2021-12-22 06:17:14 +00:00
|
|
|
};
|
2020-06-04 11:48:05 +00:00
|
|
|
Libp2pInstance(
|
2021-12-22 06:17:14 +00:00
|
|
|
LibP2PService::new(executor, libp2p_context, &log)
|
|
|
|
.await
|
|
|
|
.expect("should build libp2p instance")
|
2022-09-29 01:50:11 +00:00
|
|
|
.0,
|
2020-06-04 11:48:05 +00:00
|
|
|
signal,
|
|
|
|
)
|
2019-11-27 01:47:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(dead_code)]
|
2022-03-02 22:07:17 +00:00
|
|
|
pub fn get_enr(node: &LibP2PService<ReqId, E>) -> Enr {
|
2022-09-29 01:50:11 +00:00
|
|
|
node.local_enr()
|
2019-11-27 01:47:46 +00:00
|
|
|
}
|
|
|
|
|
2023-09-15 03:07:24 +00:00
|
|
|
// Protocol for the node pair connection.
|
|
|
|
pub enum Protocol {
|
|
|
|
Tcp,
|
|
|
|
Quic,
|
|
|
|
}
|
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
2019-11-27 01:47:46 +00:00
|
|
|
// This returns a (sender, receiver) pair.
|
|
|
|
#[allow(dead_code)]
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_node_pair(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
log: &slog::Logger,
|
2022-04-04 00:26:15 +00:00
|
|
|
fork_name: ForkName,
|
2023-08-03 01:51:47 +00:00
|
|
|
spec: &ChainSpec,
|
2023-09-15 03:07:24 +00:00
|
|
|
protocol: Protocol,
|
2020-11-28 05:30:57 +00:00
|
|
|
) -> (Libp2pInstance, Libp2pInstance) {
|
2019-11-27 01:47:46 +00:00
|
|
|
let sender_log = log.new(o!("who" => "sender"));
|
|
|
|
let receiver_log = log.new(o!("who" => "receiver"));
|
|
|
|
|
2023-08-03 01:51:47 +00:00
|
|
|
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await;
|
|
|
|
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await;
|
2019-11-27 01:47:46 +00:00
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
// let the two nodes set up listeners
|
|
|
|
let sender_fut = async {
|
|
|
|
loop {
|
2023-09-11 06:14:56 +00:00
|
|
|
if let NetworkEvent::NewListenAddr(addr) = sender.next_event().await {
|
2023-09-15 03:07:24 +00:00
|
|
|
// Only end once we've listened on the protocol we care about
|
|
|
|
match protocol {
|
|
|
|
Protocol::Tcp => {
|
|
|
|
if addr.iter().any(|multiaddr_proto| {
|
|
|
|
matches!(multiaddr_proto, libp2p::multiaddr::Protocol::Tcp(_))
|
|
|
|
}) {
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Protocol::Quic => {
|
|
|
|
if addr.iter().any(|multiaddr_proto| {
|
|
|
|
matches!(multiaddr_proto, libp2p::multiaddr::Protocol::QuicV1)
|
|
|
|
}) {
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-17 11:16:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let receiver_fut = async {
|
|
|
|
loop {
|
2023-09-11 06:14:56 +00:00
|
|
|
if let NetworkEvent::NewListenAddr(addr) = receiver.next_event().await {
|
2023-09-15 03:07:24 +00:00
|
|
|
match protocol {
|
|
|
|
Protocol::Tcp => {
|
|
|
|
if addr.iter().any(|multiaddr_proto| {
|
|
|
|
matches!(multiaddr_proto, libp2p::multiaddr::Protocol::Tcp(_))
|
|
|
|
}) {
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Protocol::Quic => {
|
|
|
|
if addr.iter().any(|multiaddr_proto| {
|
|
|
|
matches!(multiaddr_proto, libp2p::multiaddr::Protocol::QuicV1)
|
|
|
|
}) {
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-17 11:16:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let joined = futures::future::join(sender_fut, receiver_fut);
|
|
|
|
|
2023-09-11 06:14:56 +00:00
|
|
|
let receiver_multiaddr = joined.await.1;
|
2020-05-17 11:16:48 +00:00
|
|
|
|
2022-09-29 01:50:11 +00:00
|
|
|
match sender.testing_dial(receiver_multiaddr.clone()) {
|
2020-05-17 11:16:48 +00:00
|
|
|
Ok(()) => {
|
|
|
|
debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr))
|
|
|
|
}
|
2019-11-27 01:47:46 +00:00
|
|
|
Err(_) => error!(log, "Dialing failed"),
|
|
|
|
};
|
|
|
|
(sender, receiver)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns `n` peers in a linear topology
|
|
|
|
#[allow(dead_code)]
|
2022-04-04 00:26:15 +00:00
|
|
|
pub async fn build_linear(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
log: slog::Logger,
|
|
|
|
n: usize,
|
|
|
|
fork_name: ForkName,
|
2023-08-03 01:51:47 +00:00
|
|
|
spec: &ChainSpec,
|
2022-04-04 00:26:15 +00:00
|
|
|
) -> Vec<Libp2pInstance> {
|
2020-08-17 02:13:26 +00:00
|
|
|
let mut nodes = Vec::with_capacity(n);
|
|
|
|
for _ in 0..n {
|
2023-08-03 01:51:47 +00:00
|
|
|
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await);
|
2020-08-17 02:13:26 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 01:47:46 +00:00
|
|
|
let multiaddrs: Vec<Multiaddr> = nodes
|
|
|
|
.iter()
|
2021-07-30 01:11:47 +00:00
|
|
|
.map(|x| get_enr(x).multiaddr()[1].clone())
|
2019-11-27 01:47:46 +00:00
|
|
|
.collect();
|
|
|
|
for i in 0..n - 1 {
|
2022-09-29 01:50:11 +00:00
|
|
|
match nodes[i].testing_dial(multiaddrs[i + 1].clone()) {
|
2019-11-27 01:47:46 +00:00
|
|
|
Ok(()) => debug!(log, "Connected"),
|
|
|
|
Err(_) => error!(log, "Failed to connect"),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
}
|