Clean up obsolete TODOs (#1734)

Squashed commit of the following:

commit f99373cbaec9adb2bdbae3f7e903284327962083
Author: Age Manning <Age@AgeManning.com>
Date:   Mon Oct 5 18:44:09 2020 +1100

    Clean up obsolute TODOs
This commit is contained in:
Paul Hauner 2020-10-05 21:08:14 +11:00
parent ee7c8a0b7e
commit da44821e39
No known key found for this signature in database
GPG Key ID: 303E4494BB28068C
11 changed files with 21 additions and 112 deletions

View File

@ -54,8 +54,6 @@ impl<TSpec: EthSpec> DelegatingHandler<TSpec> {
}
}
// TODO: this can all be created with macros
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
/// Simply delegated to the corresponding behaviour's handler.
#[derive(Debug, Clone)]
@ -115,7 +113,6 @@ pub type DelegateOutProto<TSpec> = EitherUpgrade<
>,
>;
// TODO: prob make this an enum
pub type DelegateOutInfo<TSpec> = EitherOutput<
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
EitherOutput<
@ -216,7 +213,6 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
// TODO: find how to clean up
match info {
// Gossipsub
EitherOutput::First(info) => match error {

View File

@ -102,7 +102,7 @@ pub struct Behaviour<TSpec: EthSpec> {
/// The Eth2 RPC specified in the wire-0 protocol.
eth2_rpc: RPC<TSpec>,
/// Keep regular connection to peers and disconnect if absent.
// TODO: Using id for initial interop. This will be removed by mainnet.
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
/// Provides IP addresses and peer information.
identify: Identify,
/// The peer manager that keeps track of peer's reputation and status.
@ -203,9 +203,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
self.enr_fork_id.fork_digest,
);
// TODO: Implement scoring
// let topic: Topic = gossip_topic.into();
// self.gossipsub.set_topic_params(t.hash(), TopicScoreParams::default());
self.subscribe(gossip_topic)
}
@ -227,12 +224,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
GossipEncoding::default(),
self.enr_fork_id.fork_digest,
);
// TODO: Implement scoring
/*
let t: Topic = topic.clone().into();
self.gossipsub
.set_topic_params(t.hash(), TopicScoreParams::default());
*/
self.subscribe(topic)
}
@ -620,7 +611,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
RPCRequest::MetaData(_) => {
// send the requested meta-data
self.send_meta_data_response((handler_id, id), peer_id);
// TODO: inform the peer manager?
}
RPCRequest::Goodbye(reason) => {
// queue for disconnection without a goodbye message

View File

@ -129,7 +129,6 @@ pub fn create_enr_builder_from_config<T: EnrKey>(config: &NetworkConfig) -> EnrB
builder.udp(udp_port);
}
// we always give it our listening tcp port
// TODO: Add uPnP support to map udp and tcp ports
let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port);
builder.tcp(tcp_port).tcp(config.libp2p_port);
builder

View File

@ -147,8 +147,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
///
/// If the peer doesn't exist, log a warning and insert defaults.
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
// TODO: Remove duplicate code - This is duplicated in the update_peer_scores()
// function.
// NOTE: This is duplicated in the update_peer_scores() and could be improved.
// Variables to update the PeerDb if required.
let mut ban_peer = None;
@ -179,7 +178,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
GoodbyeReason::BadScore,
));
}
// TODO: Update the peer manager to inform that the peer is disconnecting.
}
ScoreState::Healthy => {
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
@ -399,10 +397,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
// Not supporting a protocol shouldn't be considered a malicious action, but
// it is an action that in some cases will make the peer unfit to continue
// communicating.
// TODO: To avoid punishing a peer repeatedly for not supporting a protocol, this
// information could be stored and used to prevent sending requests for the given
// protocol to this peer. Similarly, to avoid blacklisting a peer for a protocol
// forever, if stored this information should expire.
match protocol {
Protocol::Ping => PeerAction::Fatal,
Protocol::BlocksByRange => return,
@ -436,7 +431,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/// A ping request has been received.
// NOTE: The behaviour responds with a PONG automatically
// TODO: Update last seen
pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) {
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
// received a ping
@ -466,7 +460,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
/// A PONG has been returned from a peer.
// TODO: Update last seen
pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) {
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
// received a pong
@ -492,7 +485,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
/// Received a metadata response from a peer.
// TODO: Update last seen
pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) {
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
if let Some(known_meta_data) = &peer_info.meta_data {
@ -588,7 +580,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
for (peer_id, min_ttl) in results {
// we attempt a connection if this peer is a subnet peer or if the max peer count
// is not yet filled (including dialling peers)
// is not yet filled (including dialing peers)
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
&& !self
.network_globals
@ -601,7 +593,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
.read()
.is_banned_or_disconnected(&peer_id)
{
// TODO: Update output
// This should be updated with the peer dialing. In fact created once the peer is
// dialed
if let Some(min_ttl) = min_ttl {
@ -690,58 +681,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
// Update scores
info.score_update();
/* TODO: Implement logic about connection lifetimes
match info.connection_status {
Connected { .. } => {
// Connected peers gain reputation by sending useful messages
}
Disconnected { since } | Banned { since } => {
// For disconnected peers, lower their reputation by 1 for every hour they
// stay disconnected. This helps us slowly forget disconnected peers.
// In the same way, slowly allow banned peers back again.
let dc_hours = now
.checked_duration_since(since)
.unwrap_or_else(|| Duration::from_secs(0))
.as_secs()
/ 3600;
let last_dc_hours = self
._last_updated
.checked_duration_since(since)
.unwrap_or_else(|| Duration::from_secs(0))
.as_secs()
/ 3600;
if dc_hours > last_dc_hours {
// this should be 1 most of the time
let rep_dif = (dc_hours - last_dc_hours)
.try_into()
.unwrap_or(Rep::max_value());
info.reputation = if info.connection_status.is_banned() {
info.reputation.saturating_add(rep_dif)
} else {
info.reputation.saturating_sub(rep_dif)
};
}
}
Dialing { since } => {
// A peer shouldn't be dialing for more than 2 minutes
if since.elapsed().as_secs() > 120 {
warn!(self.log,"Peer has been dialing for too long"; "peer_id" => id.to_string());
// TODO: decide how to handle this
}
}
Unknown => {} //TODO: Handle this case
}
// Check if the peer gets banned or unbanned and if it should be disconnected
if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() {
// This peer gets banned. Check if we should request disconnection
ban_queue.push(id.clone());
} else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() {
// This peer gets unbanned
unban_queue.push(id.clone());
}
*/
// handle score transitions
if previous_state != info.score_state() {
match info.score_state() {
@ -765,7 +704,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
GoodbyeReason::BadScore,
));
}
// TODO: Update peer manager to report that it's disconnecting.
}
ScoreState::Healthy => {
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
@ -829,9 +767,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
///
/// NOTE: Discovery will only add a new query if one isn't already queued.
fn heartbeat(&mut self) {
// TODO: Provide a back-off time for discovery queries. I.e Queue many initially, then only
// perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats. This
// is achievable with a leaky bucket
let peer_count = self.network_globals.connected_or_dialing_peers();
if peer_count < self.target_peers {
// If we need more peers, queue a discovery lookup.

View File

@ -130,7 +130,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
}
/// Returns a mutable reference to a peer's info if known.
/// TODO: make pub(super) to ensure that peer management is unified
pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo<TSpec>> {
self.peers.get_mut(peer_id)
}

View File

@ -25,8 +25,6 @@ use std::{
use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant};
use types::EthSpec;
//TODO: Implement check_timeout() on the substream types
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
pub const RESPONSE_TIMEOUT: u64 = 10;
@ -163,8 +161,6 @@ struct OutboundInfo<TSpec: EthSpec> {
/// Info over the protocol this substream is handling.
proto: Protocol,
/// Number of chunks to be seen from the peer's response.
// TODO: removing the option could allow clossing the streams after the number of
// expected responses is met for all protocols.
remaining_chunks: Option<u64>,
/// `RequestId` as given by the application that sent the request.
req_id: RequestId,

View File

@ -195,14 +195,9 @@ impl<T: BeaconChainTypes> AttestationService<T> {
slot: subscription.slot,
};
// determine if the validator is an aggregator. If so, we subscribe to the subnet and
// Determine if the validator is an aggregator. If so, we subscribe to the subnet and
// if successful add the validator to a mapping of known aggregators for that exact
// subnet.
// NOTE: There is a chance that a fork occurs between now and when the validator needs
// to aggregate attestations. If this happens, the signature will no longer be valid
// and it could be likely the validator no longer needs to aggregate. More
// sophisticated logic should be added using known future forks.
// TODO: Implement
if subscription.is_aggregator {
metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS);
@ -286,8 +281,6 @@ impl<T: BeaconChainTypes> AttestationService<T> {
min_ttl,
})
} else {
// TODO: Send the time frame needed to have a peer connected, so that we can
// maintain peers for a least this duration.
// We may want to check the global PeerInfo to see estimated timeouts for each
// peer before they can be removed.
warn!(self.log,

View File

@ -51,7 +51,7 @@ pub enum NetworkMessage<T: EthSpec> {
},
/// Respond to a peer's request with an error.
SendError {
// TODO: note that this is never used, we just say goodbye without nicely closing the
// NOTE: Currently this is never used, we just say goodbye without nicely closing the
// stream assigned to the request
peer_id: PeerId,
error: RPCResponseErrorCode,
@ -163,7 +163,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
);
for enr in enrs_to_load {
libp2p.swarm.add_enr(enr.clone()); //TODO change?
libp2p.swarm.add_enr(enr.clone());
}
// launch derived network services
@ -349,7 +349,6 @@ fn spawn_service<T: BeaconChainTypes>(
// process any attestation service events
Some(attestation_service_message) = service.attestation_service.next() => {
match attestation_service_message {
// TODO: Implement
AttServiceMessage::Subscribe(subnet_id) => {
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
}

View File

@ -119,7 +119,6 @@ pub enum SyncMessage<T: EthSpec> {
}
/// The result of processing a multiple blocks (a chain segment).
// TODO: When correct batch error handling occurs, we will include an error type.
#[derive(Debug)]
pub enum BatchProcessResult {
/// The batch was completed successfully. It carries whether the sent batch contained blocks.
@ -629,7 +628,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
self.update_sync_state();
}
// TODO: Group these functions into one.
// TODO: Group these functions into one for cleaner code.
/// Updates the syncing state of a peer to be synced.
fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) {
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
@ -792,7 +791,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// This currently can be a host of errors. We permit this due to the partial
// ambiguity.
// TODO: Refine the error types and score the peer appropriately.
self.network.report_peer(
parent_request.last_submitted_peer,
PeerAction::MidToleranceError,

View File

@ -613,9 +613,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => {
unreachable!("batch indicates inconsistent chain state while advancing chain")
}
BatchState::AwaitingProcessing(..) => {
// TODO: can we be sure the old attempts are wrong?
}
BatchState::AwaitingProcessing(..) => {}
BatchState::Processing(_) => {
assert_eq!(
id,
@ -651,9 +649,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
/// These events occur when a peer has successfully responded with blocks, but the blocks we
/// have received are incorrect or invalid. This indicates the peer has not performed as
/// intended and can result in downvoting a peer.
// TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We
// need to add logic for partial batch downloads. Potentially, if another peer returns the same
// batch, we try a partial download.
fn handle_invalid_batch(
&mut self,
network: &mut SyncNetworkContext<T::EthSpec>,

View File

@ -220,7 +220,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
if let Some(removed_chain) = removed_chain {
debug!(self.log, "Chain removed after block response"; "sync_type" => ?sync_type, "chain_id" => chain_id);
removed_chain.status_peers(network);
// TODO: update & update_sync_state?
// update the state of the collection
self.chains.update(network);
// update the global state and inform the user
self.chains.update_sync_state(network);
}
}
Err(_) => {
@ -319,7 +322,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
.call_all(|chain| chain.remove_peer(peer_id, network))
{
debug!(self.log, "Chain removed after removing peer"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
// TODO: anything else to do?
// update the state of the collection
self.chains.update(network);
// update the global state and inform the user
self.chains.update_sync_state(network);
}
}
@ -343,7 +349,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
if let Some(removed_chain) = removed_chain {
debug!(self.log, "Chain removed on rpc error"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
removed_chain.status_peers(network);
// TODO: update & update_sync_state?
// update the state of the collection
self.chains.update(network);
// update the global state and inform the user
self.chains.update_sync_state(network);
}
}
Err(_) => {