2019-11-27 01:47:46 +00:00
|
|
|
#![cfg(test)]
|
2020-12-23 07:53:36 +00:00
|
|
|
use libp2p::gossipsub::GossipsubConfigBuilder;
|
Rename eth2_libp2p to lighthouse_network (#2702)
## Description
The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration.
As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers.
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p.
Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2021-10-19 00:30:39 +00:00
|
|
|
use lighthouse_network::Enr;
|
|
|
|
use lighthouse_network::EnrExt;
|
|
|
|
use lighthouse_network::Multiaddr;
|
|
|
|
use lighthouse_network::Service as LibP2PService;
|
|
|
|
use lighthouse_network::{Libp2pEvent, NetworkConfig};
|
2019-11-27 01:47:46 +00:00
|
|
|
use slog::{debug, error, o, Drain};
|
2020-04-16 07:24:12 +00:00
|
|
|
use std::net::{TcpListener, UdpSocket};
|
2021-08-04 01:44:57 +00:00
|
|
|
use std::sync::Arc;
|
2020-11-28 05:30:57 +00:00
|
|
|
use std::sync::Weak;
|
2019-11-27 01:47:46 +00:00
|
|
|
use std::time::Duration;
|
2020-11-28 05:30:57 +00:00
|
|
|
use tokio::runtime::Runtime;
|
2021-09-17 01:11:16 +00:00
|
|
|
use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, Hash256, MinimalEthSpec};
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
|
|
|
|
type E = MinimalEthSpec;
|
2021-01-06 06:36:11 +00:00
|
|
|
use tempfile::Builder as TempBuilder;
|
2019-11-27 01:47:46 +00:00
|
|
|
|
2021-08-04 01:44:57 +00:00
|
|
|
/// Returns a dummy fork context
|
|
|
|
fn fork_context() -> ForkContext {
|
2021-09-17 01:11:16 +00:00
|
|
|
let mut chain_spec = E::default_spec();
|
|
|
|
// Set fork_epoch to `Some` to ensure that the `ForkContext` object
|
|
|
|
// includes altair in the list of forks
|
|
|
|
chain_spec.altair_fork_epoch = Some(types::Epoch::new(42));
|
|
|
|
ForkContext::new::<E>(types::Slot::new(0), Hash256::zero(), &chain_spec)
|
2021-08-04 01:44:57 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 11:48:05 +00:00
|
|
|
pub struct Libp2pInstance(LibP2PService<E>, exit_future::Signal);
|
|
|
|
|
|
|
|
impl std::ops::Deref for Libp2pInstance {
|
|
|
|
type Target = LibP2PService<E>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::ops::DerefMut for Libp2pInstance {
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
&mut self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-27 01:47:46 +00:00
|
|
|
pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
|
|
|
|
let decorator = slog_term::TermDecorator::new().build();
|
|
|
|
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
|
|
|
let drain = slog_async::Async::new(drain).build().fuse();
|
|
|
|
|
|
|
|
if enabled {
|
|
|
|
slog::Logger::root(drain.filter_level(level).fuse(), o!())
|
|
|
|
} else {
|
|
|
|
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 07:24:12 +00:00
|
|
|
// A bit of hack to find an unused port.
|
|
|
|
///
|
2021-05-06 00:36:22 +00:00
|
|
|
/// Does not guarantee that the given port is unused after the function exits, just that it was
|
2020-04-16 07:24:12 +00:00
|
|
|
/// unused before the function started (i.e., it does not reserve a port).
|
|
|
|
pub fn unused_port(transport: &str) -> Result<u16, String> {
|
|
|
|
let local_addr = match transport {
|
|
|
|
"tcp" => {
|
|
|
|
let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| {
|
|
|
|
format!("Failed to create TCP listener to find unused port: {:?}", e)
|
|
|
|
})?;
|
|
|
|
listener.local_addr().map_err(|e| {
|
|
|
|
format!(
|
|
|
|
"Failed to read TCP listener local_addr to find unused port: {:?}",
|
|
|
|
e
|
|
|
|
)
|
|
|
|
})?
|
|
|
|
}
|
|
|
|
"udp" => {
|
|
|
|
let socket = UdpSocket::bind("127.0.0.1:0")
|
|
|
|
.map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?;
|
|
|
|
socket.local_addr().map_err(|e| {
|
|
|
|
format!(
|
|
|
|
"Failed to read UDP socket local_addr to find unused port: {:?}",
|
|
|
|
e
|
|
|
|
)
|
|
|
|
})?
|
|
|
|
}
|
|
|
|
_ => return Err("Invalid transport to find unused port".into()),
|
|
|
|
};
|
|
|
|
Ok(local_addr.port())
|
|
|
|
}
|
|
|
|
|
2020-06-19 23:34:28 +00:00
|
|
|
pub fn build_config(port: u16, mut boot_nodes: Vec<Enr>) -> NetworkConfig {
|
2019-11-27 01:47:46 +00:00
|
|
|
let mut config = NetworkConfig::default();
|
2021-01-06 06:36:11 +00:00
|
|
|
let path = TempBuilder::new()
|
|
|
|
.prefix(&format!("libp2p_test{}", port))
|
|
|
|
.tempdir()
|
|
|
|
.unwrap();
|
2020-03-02 02:35:20 +00:00
|
|
|
|
2019-11-27 01:47:46 +00:00
|
|
|
config.libp2p_port = port; // tcp port
|
|
|
|
config.discovery_port = port; // udp port
|
2020-03-19 04:11:08 +00:00
|
|
|
config.enr_tcp_port = Some(port);
|
|
|
|
config.enr_udp_port = Some(port);
|
|
|
|
config.enr_address = Some("127.0.0.1".parse().unwrap());
|
2020-08-17 02:13:26 +00:00
|
|
|
config.boot_nodes_enr.append(&mut boot_nodes);
|
2020-03-02 02:35:20 +00:00
|
|
|
config.network_dir = path.into_path();
|
2019-11-27 01:47:46 +00:00
|
|
|
// Reduce gossipsub heartbeat parameters
|
2020-08-30 13:06:50 +00:00
|
|
|
config.gs_config = GossipsubConfigBuilder::from(config.gs_config)
|
|
|
|
.heartbeat_initial_delay(Duration::from_millis(500))
|
|
|
|
.heartbeat_interval(Duration::from_millis(500))
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
2019-11-27 01:47:46 +00:00
|
|
|
config
|
|
|
|
}
|
|
|
|
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_libp2p_instance(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
boot_nodes: Vec<Enr>,
|
|
|
|
log: slog::Logger,
|
|
|
|
) -> Libp2pInstance {
|
2020-04-20 06:54:37 +00:00
|
|
|
let port = unused_port("tcp").unwrap();
|
2020-06-19 23:34:28 +00:00
|
|
|
let config = build_config(port, boot_nodes);
|
2019-11-27 01:47:46 +00:00
|
|
|
// launch libp2p service
|
2020-06-04 11:48:05 +00:00
|
|
|
|
|
|
|
let (signal, exit) = exit_future::signal();
|
2020-08-19 05:51:14 +00:00
|
|
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
2020-11-28 05:30:57 +00:00
|
|
|
let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx);
|
2021-08-04 01:44:57 +00:00
|
|
|
let fork_context = Arc::new(fork_context());
|
2020-06-04 11:48:05 +00:00
|
|
|
Libp2pInstance(
|
2020-11-12 01:48:28 +00:00
|
|
|
LibP2PService::new(
|
|
|
|
executor,
|
|
|
|
&config,
|
|
|
|
EnrForkId::default(),
|
|
|
|
&log,
|
2021-08-04 01:44:57 +00:00
|
|
|
fork_context,
|
2020-11-12 01:48:28 +00:00
|
|
|
&ChainSpec::minimal(),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.expect("should build libp2p instance")
|
|
|
|
.1,
|
2020-06-04 11:48:05 +00:00
|
|
|
signal,
|
|
|
|
)
|
2019-11-27 01:47:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(dead_code)]
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
pub fn get_enr(node: &LibP2PService<E>) -> Enr {
|
2021-06-03 01:11:33 +00:00
|
|
|
node.swarm.behaviour().local_enr()
|
2019-11-27 01:47:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns `n` libp2p peers in fully connected topology.
|
|
|
|
#[allow(dead_code)]
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_full_mesh(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
log: slog::Logger,
|
|
|
|
n: usize,
|
|
|
|
) -> Vec<Libp2pInstance> {
|
2020-08-17 02:13:26 +00:00
|
|
|
let mut nodes = Vec::with_capacity(n);
|
|
|
|
for _ in 0..n {
|
2020-11-28 05:30:57 +00:00
|
|
|
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await);
|
2020-08-17 02:13:26 +00:00
|
|
|
}
|
2019-11-27 01:47:46 +00:00
|
|
|
let multiaddrs: Vec<Multiaddr> = nodes
|
|
|
|
.iter()
|
2021-07-30 01:11:47 +00:00
|
|
|
.map(|x| get_enr(x).multiaddr()[1].clone())
|
2019-11-27 01:47:46 +00:00
|
|
|
.collect();
|
|
|
|
|
2020-01-21 07:38:56 +00:00
|
|
|
for (i, node) in nodes.iter_mut().enumerate().take(n) {
|
|
|
|
for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) {
|
2019-11-27 01:47:46 +00:00
|
|
|
if i != j {
|
2021-11-18 05:08:42 +00:00
|
|
|
match libp2p::Swarm::dial(&mut node.swarm, multiaddr.clone()) {
|
2019-11-27 01:47:46 +00:00
|
|
|
Ok(()) => debug!(log, "Connected"),
|
|
|
|
Err(_) => error!(log, "Failed to connect"),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
2020-05-17 11:16:48 +00:00
|
|
|
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
2019-11-27 01:47:46 +00:00
|
|
|
// This returns a (sender, receiver) pair.
|
|
|
|
#[allow(dead_code)]
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_node_pair(
|
|
|
|
rt: Weak<Runtime>,
|
|
|
|
log: &slog::Logger,
|
|
|
|
) -> (Libp2pInstance, Libp2pInstance) {
|
2019-11-27 01:47:46 +00:00
|
|
|
let sender_log = log.new(o!("who" => "sender"));
|
|
|
|
let receiver_log = log.new(o!("who" => "receiver"));
|
|
|
|
|
2020-11-28 05:30:57 +00:00
|
|
|
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await;
|
|
|
|
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await;
|
2019-11-27 01:47:46 +00:00
|
|
|
|
2021-06-03 01:11:33 +00:00
|
|
|
let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone();
|
2020-05-17 11:16:48 +00:00
|
|
|
|
|
|
|
// let the two nodes set up listeners
|
|
|
|
let sender_fut = async {
|
|
|
|
loop {
|
|
|
|
if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let receiver_fut = async {
|
|
|
|
loop {
|
|
|
|
if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let joined = futures::future::join(sender_fut, receiver_fut);
|
|
|
|
|
|
|
|
// wait for either both nodes to listen or a timeout
|
|
|
|
tokio::select! {
|
2020-11-28 05:30:57 +00:00
|
|
|
_ = tokio::time::sleep(Duration::from_millis(500)) => {}
|
2020-05-17 11:16:48 +00:00
|
|
|
_ = joined => {}
|
|
|
|
}
|
|
|
|
|
2021-11-18 05:08:42 +00:00
|
|
|
match libp2p::Swarm::dial(&mut sender.swarm, receiver_multiaddr.clone()) {
|
2020-05-17 11:16:48 +00:00
|
|
|
Ok(()) => {
|
|
|
|
debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr))
|
|
|
|
}
|
2019-11-27 01:47:46 +00:00
|
|
|
Err(_) => error!(log, "Dialing failed"),
|
|
|
|
};
|
|
|
|
(sender, receiver)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns `n` peers in a linear topology
|
|
|
|
#[allow(dead_code)]
|
2020-11-28 05:30:57 +00:00
|
|
|
pub async fn build_linear(rt: Weak<Runtime>, log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
|
2020-08-17 02:13:26 +00:00
|
|
|
let mut nodes = Vec::with_capacity(n);
|
|
|
|
for _ in 0..n {
|
2020-11-28 05:30:57 +00:00
|
|
|
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await);
|
2020-08-17 02:13:26 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 01:47:46 +00:00
|
|
|
let multiaddrs: Vec<Multiaddr> = nodes
|
|
|
|
.iter()
|
2021-07-30 01:11:47 +00:00
|
|
|
.map(|x| get_enr(x).multiaddr()[1].clone())
|
2019-11-27 01:47:46 +00:00
|
|
|
.collect();
|
|
|
|
for i in 0..n - 1 {
|
2021-11-18 05:08:42 +00:00
|
|
|
match libp2p::Swarm::dial(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) {
|
2019-11-27 01:47:46 +00:00
|
|
|
Ok(()) => debug!(log, "Connected"),
|
|
|
|
Err(_) => error!(log, "Failed to connect"),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
}
|