Updates syncing, corrects CLI variables

This commit is contained in:
Age Manning 2019-09-03 00:34:41 +10:00
parent c1614b110b
commit cd7b6da88e
No known key found for this signature in database
GPG Key ID: 05EED64B79E06A93
6 changed files with 374 additions and 300 deletions

View File

@ -442,6 +442,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
None None
} }
/// Returns the block canonical root of the current canonical chain at a given slot.
///
/// Returns None if a block doesn't exist at the slot.
pub fn root_at_slot(&self, target_slot: Slot) -> Option<Hash256> {
self.rev_iter_block_roots()
.find(|(_root, slot)| *slot == target_slot)
.map(|(root, _slot)| root)
}
/// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since
/// genesis. /// genesis.
pub fn slots_since_genesis(&self) -> Option<SlotHeight> { pub fn slots_since_genesis(&self) -> Option<SlotHeight> {
@ -1006,7 +1015,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}; };
// Load the parent blocks state from the database, returning an error if it is not found. // Load the parent blocks state from the database, returning an error if it is not found.
// It is an error because if know the parent block we should also know the parent state. // It is an error because if we know the parent block we should also know the parent state.
let parent_state_root = parent_block.state_root; let parent_state_root = parent_block.state_root;
let parent_state = self let parent_state = self
.store .store

View File

@ -341,13 +341,9 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) {
} }
Err(e) => { Err(e) => {
warn!( warn!(
log, log,
<<<<<<< HEAD "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e)
"Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e) );
=======
"Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e)
>>>>>>> interop
);
} }
} }
} }

View File

@ -82,17 +82,10 @@ impl Service {
// attempt to connect to user-input libp2p nodes // attempt to connect to user-input libp2p nodes
for multiaddr in config.libp2p_nodes { for multiaddr in config.libp2p_nodes {
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
<<<<<<< HEAD
Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)), Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
Err(err) => debug!( Err(err) => debug!(
log, log,
"Could not connect to peer"; "address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) "Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err)
=======
Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)),
Err(err) => debug!(
log,
"Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err)
>>>>>>> interop
), ),
}; };
} }
@ -129,7 +122,6 @@ impl Service {
let mut subscribed_topics = vec![]; let mut subscribed_topics = vec![];
for topic in topics { for topic in topics {
if swarm.subscribe(topic.clone()) { if swarm.subscribe(topic.clone()) {
<<<<<<< HEAD
trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic)); trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic));
subscribed_topics.push(topic); subscribed_topics.push(topic);
} else { } else {
@ -137,15 +129,6 @@ impl Service {
} }
} }
info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>())); info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>()));
=======
trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic));
subscribed_topics.push(topic);
} else {
warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic));
}
}
info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>()));
>>>>>>> interop
Ok(Service { Ok(Service {
local_peer_id, local_peer_id,

View File

@ -80,6 +80,9 @@ const PARENT_FAIL_TOLERANCE: usize = 3;
/// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// canonical chain to its head once the peer connects. A chain should not appear where it's depth
/// is further back than the most recent head slot. /// is further back than the most recent head slot.
const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2;
/// The number of empty batches we tolerate before dropping the peer. This prevents endless
/// requests to peers who never return blocks.
const EMPTY_BATCH_TOLERANCE: usize = 100;
#[derive(PartialEq)] #[derive(PartialEq)]
/// The current state of a block or batches lookup. /// The current state of a block or batches lookup.
@ -95,6 +98,19 @@ enum BlockRequestsState {
Failed, Failed,
} }
/// The state of batch requests.
enum SyncDirection {
/// The batch has just been initialised and we need to check to see if a backward sync is
/// required on first batch response.
Initial,
/// We are syncing forwards, the next batch should contain higher slot numbers than is
/// predecessor.
Forwards,
/// We are syncing backwards and looking for a common ancestor chain before we can start
/// processing the downloaded blocks.
Backwards,
}
/// `BlockRequests` keep track of the long-range (batch) sync process per peer. /// `BlockRequests` keep track of the long-range (batch) sync process per peer.
struct BlockRequests<T: EthSpec> { struct BlockRequests<T: EthSpec> {
/// The peer's head slot and the target of this batch download. /// The peer's head slot and the target of this batch download.
@ -104,10 +120,13 @@ struct BlockRequests<T: EthSpec> {
target_head_root: Hash256, target_head_root: Hash256,
/// The blocks that we have currently downloaded from the peer that are yet to be processed. /// The blocks that we have currently downloaded from the peer that are yet to be processed.
downloaded_blocks: Vec<BeaconBlock<T>>, downloaded_blocks: Vec<BeaconBlock<T>>,
/// The number of empty batches we have consecutively received. If a peer returns more than
/// EMPTY_BATCHES_TOLERANCE, they are dropped.
consecutive_empty_batches: usize,
/// The current state of this batch request. /// The current state of this batch request.
state: BlockRequestsState, state: BlockRequestsState,
/// Specifies whether the current state is syncing forwards or backwards. /// Specifies the current direction of this batch request.
forward_sync: bool, sync_direction: SyncDirection,
/// The current `start_slot` of the batched block request. /// The current `start_slot` of the batched block request.
current_start_slot: Slot, current_start_slot: Slot,
} }
@ -129,10 +148,13 @@ struct ParentRequests<T: EthSpec> {
impl<T: EthSpec> BlockRequests<T> { impl<T: EthSpec> BlockRequests<T> {
/// Gets the next start slot for a batch and transitions the state to a Queued state. /// Gets the next start slot for a batch and transitions the state to a Queued state.
fn update_start_slot(&mut self) { fn update_start_slot(&mut self) {
if self.forward_sync { match self.sync_direction {
self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); SyncDirection::Initial | SyncDirection::Forwards => {
} else { self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST);
self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); }
SyncDirection::Backwards => {
self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST);
}
} }
self.state = BlockRequestsState::Queued; self.state = BlockRequestsState::Queued;
} }
@ -175,6 +197,8 @@ pub(crate) enum ImportManagerOutcome {
/// controls the logic behind both the long-range (batch) sync and the on-going potential parent /// controls the logic behind both the long-range (batch) sync and the on-going potential parent
/// look-up of blocks. /// look-up of blocks.
pub struct ImportManager<T: BeaconChainTypes> { pub struct ImportManager<T: BeaconChainTypes> {
/// List of events to be processed externally.
event_queue: SmallVec<[ImportManagerOutcome; 20]>,
/// A weak reference to the underlying beacon chain. /// A weak reference to the underlying beacon chain.
chain: Weak<BeaconChain<T>>, chain: Weak<BeaconChain<T>>,
/// The current state of the import manager. /// The current state of the import manager.
@ -200,6 +224,7 @@ impl<T: BeaconChainTypes> ImportManager<T> {
/// dropped during the syncing process. The syncing handles this termination gracefully. /// dropped during the syncing process. The syncing handles this termination gracefully.
pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: &slog::Logger) -> Self { pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: &slog::Logger) -> Self {
ImportManager { ImportManager {
event_queue: SmallVec::new(),
chain: Arc::downgrade(&beacon_chain), chain: Arc::downgrade(&beacon_chain),
state: ManagerState::Regular, state: ManagerState::Regular,
import_queue: HashMap::new(), import_queue: HashMap::new(),
@ -253,7 +278,7 @@ impl<T: BeaconChainTypes> ImportManager<T> {
// Check if the peer is significantly is behind us. If within `SLOT_IMPORT_TOLERANCE` // Check if the peer is significantly is behind us. If within `SLOT_IMPORT_TOLERANCE`
// treat them as a fully synced peer. If not, ignore them in the sync process // treat them as a fully synced peer. If not, ignore them in the sync process
if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE {
self.add_full_peer(peer_id); self.add_full_peer(peer_id.clone());
} else { } else {
debug!( debug!(
self.log, self.log,
@ -275,9 +300,10 @@ impl<T: BeaconChainTypes> ImportManager<T> {
let block_requests = BlockRequests { let block_requests = BlockRequests {
target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called
target_head_root: remote.head_root, target_head_root: remote.head_root,
consecutive_empty_batches: 0,
downloaded_blocks: Vec::new(), downloaded_blocks: Vec::new(),
state: BlockRequestsState::Queued, state: BlockRequestsState::Queued,
forward_sync: true, sync_direction: SyncDirection::Initial,
current_start_slot: chain.best_slot(), current_start_slot: chain.best_slot(),
}; };
self.import_queue.insert(peer_id, block_requests); self.import_queue.insert(peer_id, block_requests);
@ -291,6 +317,16 @@ impl<T: BeaconChainTypes> ImportManager<T> {
request_id: RequestId, request_id: RequestId,
mut blocks: Vec<BeaconBlock<T::EthSpec>>, mut blocks: Vec<BeaconBlock<T::EthSpec>>,
) { ) {
// ensure the underlying chain still exists
let chain = match self.chain.upgrade() {
Some(chain) => chain,
None => {
debug!(self.log, "Chain dropped. Sync terminating");
self.event_queue.clear();
return;
}
};
// find the request associated with this response // find the request associated with this response
let block_requests = match self let block_requests = match self
.import_queue .import_queue
@ -315,10 +351,19 @@ impl<T: BeaconChainTypes> ImportManager<T> {
if blocks.is_empty() { if blocks.is_empty() {
debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id);
block_requests.update_start_slot(); block_requests.consecutive_empty_batches += 1;
if block_requests.consecutive_empty_batches >= EMPTY_BATCH_TOLERANCE {
warn!(self.log, "Peer returned too many empty block batches";
"peer" => format!("{:?}", peer_id));
block_requests.state = BlockRequestsState::Failed;
} else {
block_requests.update_start_slot();
}
return; return;
} }
block_requests.consecutive_empty_batches = 0;
// verify the range of received blocks // verify the range of received blocks
// Note that the order of blocks is verified in block processing // Note that the order of blocks is verified in block processing
let last_sent_slot = blocks[blocks.len() - 1].slot; let last_sent_slot = blocks[blocks.len() - 1].slot;
@ -328,83 +373,90 @@ impl<T: BeaconChainTypes> ImportManager<T> {
.add(MAX_BLOCKS_PER_REQUEST) .add(MAX_BLOCKS_PER_REQUEST)
< last_sent_slot < last_sent_slot
{ {
//TODO: Downvote peer - add a reason to failed
dbg!(&blocks);
warn!(self.log, "BeaconBlocks response returned out of range blocks"; warn!(self.log, "BeaconBlocks response returned out of range blocks";
"request_id" => request_id, "request_id" => request_id,
"response_initial_slot" => blocks[0].slot, "response_initial_slot" => blocks[0].slot,
"requested_initial_slot" => block_requests.current_start_slot); "requested_initial_slot" => block_requests.current_start_slot);
self.event_queue
.push(ImportManagerOutcome::DownvotePeer(peer_id));
// consider this sync failed // consider this sync failed
block_requests.state = BlockRequestsState::Failed; block_requests.state = BlockRequestsState::Failed;
return; return;
} }
// Determine if more blocks need to be downloaded. There are a few cases: // Determine if more blocks need to be downloaded. There are a few cases:
// - We have downloaded a batch from our head_slot, which has not reached the remotes head // - We are in initial sync mode - We have requested blocks and need to determine if this
// (target head). Therefore we need to download another sequential batch. // is part of a known chain to determine the whether to start syncing backwards or continue
// - The latest batch includes blocks that greater than or equal to the target_head slot, // syncing forwards.
// which means we have caught up to their head. We then check to see if the first // - We are syncing backwards and need to verify if we have found a common ancestor in
// block downloaded matches our head. If so, we are on the same chain and can process // order to start processing the downloaded blocks.
// the blocks. If not we need to sync back further until we are on the same chain. So // - We are syncing forwards. We mark this as complete and check if any further blocks are
// request more blocks. // required to download when processing the batch.
// - We are syncing backwards (from our head slot) and need to check if we are on the same
// chain. If so, process the blocks, if not, request more blocks all the way up to
// our last finalized slot.
if block_requests.forward_sync { match block_requests.sync_direction {
// append blocks if syncing forward SyncDirection::Initial => {
block_requests.downloaded_blocks.append(&mut blocks); block_requests.downloaded_blocks.append(&mut blocks);
} else {
// prepend blocks if syncing backwards
block_requests.downloaded_blocks.splice(..0, blocks);
}
// does the batch contain the target_head_slot // this batch is the first batch downloaded. Check if we can process or if we need
let last_element_index = block_requests.downloaded_blocks.len() - 1; // to backwards search.
if block_requests.downloaded_blocks[last_element_index].slot
>= block_requests.target_head_slot //TODO: Decide which is faster. Reading block from db and comparing or calculating
|| !block_requests.forward_sync //the hash tree root and comparing.
{ let earliest_slot = block_requests.downloaded_blocks[0].slot;
// if the batch is on our chain, this is complete and we can then process. if Some(block_requests.downloaded_blocks[0].canonical_root())
// Otherwise start backwards syncing until we reach a common chain. == chain.root_at_slot(earliest_slot)
let earliest_slot = block_requests.downloaded_blocks[0].slot; {
//TODO: Decide which is faster. Reading block from db and comparing or calculating // we have a common head, start processing and begin a forwards sync
//the hash tree root and comparing. block_requests.sync_direction = SyncDirection::Forwards;
if Some(block_requests.downloaded_blocks[0].canonical_root()) block_requests.state = BlockRequestsState::ReadyToProcess;
== root_at_slot(&self.chain, earliest_slot) return;
{ }
block_requests.state = BlockRequestsState::Complete; // no common head, begin a backwards search
return; block_requests.sync_direction = SyncDirection::Backwards;
block_requests.current_start_slot =
std::cmp::min(chain.best_slot(), block_requests.downloaded_blocks[0].slot);
block_requests.update_start_slot();
} }
SyncDirection::Forwards => {
// not on the same chain, request blocks backwards // continue processing all blocks forwards, verify the end in the processing
let state = &self.chain.head().beacon_state; block_requests.downloaded_blocks.append(&mut blocks);
let local_finalized_slot = state block_requests.state = BlockRequestsState::ReadyToProcess;
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
// check that the request hasn't failed by having no common chain
if local_finalized_slot >= block_requests.current_start_slot {
warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id);
block_requests.state = BlockRequestsState::Failed;
return;
} }
SyncDirection::Backwards => {
block_requests.downloaded_blocks.splice(..0, blocks);
// if this is a forward sync, then we have reached the head without a common chain // verify the request hasn't failed by having no common ancestor chain
// and we need to start syncing backwards. // get our local finalized_slot
if block_requests.forward_sync { let local_finalized_slot = {
// Start a backwards sync by requesting earlier blocks let state = &chain.head().beacon_state;
block_requests.forward_sync = false; state
block_requests.current_start_slot = std::cmp::min( .finalized_checkpoint
self.chain.best_slot(), .epoch
block_requests.downloaded_blocks[0].slot, .start_slot(T::EthSpec::slots_per_epoch())
); };
if local_finalized_slot >= block_requests.current_start_slot {
warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id);
block_requests.state = BlockRequestsState::Failed;
return;
}
// check if we have reached a common chain ancestor
let earliest_slot = block_requests.downloaded_blocks[0].slot;
if Some(block_requests.downloaded_blocks[0].canonical_root())
== chain.root_at_slot(earliest_slot)
{
// we have a common head, start processing and begin a forwards sync
block_requests.sync_direction = SyncDirection::Forwards;
block_requests.state = BlockRequestsState::ReadyToProcess;
return;
}
// no common chain, haven't passed last_finalized_head, so continue backwards
// search
block_requests.update_start_slot();
} }
} }
// update the start slot and re-queue the batch
block_requests.update_start_slot();
} }
pub fn recent_blocks_response( pub fn recent_blocks_response(
@ -447,7 +499,7 @@ impl<T: BeaconChainTypes> ImportManager<T> {
} }
// queue for processing // queue for processing
parent_request.state = BlockRequestsState::Complete; parent_request.state = BlockRequestsState::ReadyToProcess;
} }
pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { pub fn _inject_error(_peer_id: PeerId, _id: RequestId) {
@ -500,29 +552,41 @@ impl<T: BeaconChainTypes> ImportManager<T> {
pub(crate) fn poll(&mut self) -> ImportManagerOutcome { pub(crate) fn poll(&mut self) -> ImportManagerOutcome {
loop { loop {
//TODO: Optimize the lookups. Potentially keep state of whether each of these functions
//need to be called.
// only break once everything has been processed
let mut re_run = false;
// only process batch requests if there are any
if !self.import_queue.is_empty() {
// process potential block requests
self.process_potential_block_requests();
// process any complete long-range batches
re_run = self.process_complete_batches();
}
// only process parent objects if we are in regular sync
if let ManagerState::Regular = self.state {
// process any parent block lookup-requests
self.process_parent_requests();
// process any complete parent lookups
re_run = self.process_complete_parent_requests();
}
// return any queued events
if !self.event_queue.is_empty() {
let event = self.event_queue.remove(0);
self.event_queue.shrink_to_fit();
return event;
}
// update the state of the manager // update the state of the manager
self.update_state(); self.update_state();
// process potential block requests if !re_run {
if let Some(outcome) = self.process_potential_block_requests() {
return outcome;
}
// process any complete long-range batches
if let Some(outcome) = self.process_complete_batches() {
return outcome;
}
// process any parent block lookup-requests
if let Some(outcome) = self.process_parent_requests() {
return outcome;
}
// process any complete parent lookups
let (re_run, outcome) = self.process_complete_parent_requests();
if let Some(outcome) = outcome {
return outcome;
} else if !re_run {
break; break;
} }
} }
@ -549,11 +613,11 @@ impl<T: BeaconChainTypes> ImportManager<T> {
} }
} }
fn process_potential_block_requests(&mut self) -> Option<ImportManagerOutcome> { fn process_potential_block_requests(&mut self) {
// check if an outbound request is required // check if an outbound request is required
// Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p
// layer and not needed here. // layer and not needed here. Therefore we create many outbound requests and let the RPC
// If any in queued state we submit a request. // handle the number of simultaneous requests. Request all queued objects.
// remove any failed batches // remove any failed batches
let debug_log = &self.log; let debug_log = &self.log;
@ -585,56 +649,84 @@ impl<T: BeaconChainTypes> ImportManager<T> {
count: MAX_BLOCKS_PER_REQUEST, count: MAX_BLOCKS_PER_REQUEST,
step: 0, step: 0,
}; };
return Some(ImportManagerOutcome::RequestBlocks { self.event_queue.push(ImportManagerOutcome::RequestBlocks {
peer_id: peer_id.clone(), peer_id: peer_id.clone(),
request, request,
request_id, request_id,
}); });
} }
None
} }
fn process_complete_batches(&mut self) -> Option<ImportManagerOutcome> { fn process_complete_batches(&mut self) -> bool {
let completed_batches = self // flag to indicate if the manager can be switched to idle or not
.import_queue let mut re_run = false;
.iter()
.filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) // create reference variables to be moved into subsequent closure
.map(|(peer, _)| peer) let chain_ref = self.chain.clone();
.cloned() let log_ref = &self.log;
.collect::<Vec<PeerId>>(); let event_queue_ref = &mut self.event_queue;
for peer_id in completed_batches {
let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); self.import_queue.retain(|peer_id, block_requests| {
match self.process_blocks(block_requests.downloaded_blocks.clone()) { // check that the chain still exists
Ok(()) => { if let Some(chain) = chain_ref.upgrade() {
//TODO: Verify it's impossible to have empty downloaded_blocks let downloaded_blocks =
let last_element = block_requests.downloaded_blocks.len() - 1; std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new());
debug!(self.log, "Blocks processed successfully"; let last_element = block_requests.downloaded_blocks.len() - 1;
"peer" => format!("{:?}", peer_id), let start_slot = block_requests.downloaded_blocks[0].slot;
"start_slot" => block_requests.downloaded_blocks[0].slot, let end_slot = block_requests.downloaded_blocks[last_element].slot;
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1, match process_blocks(chain, downloaded_blocks, log_ref) {
); Ok(()) => {
// Re-HELLO to ensure we are up to the latest head debug!(log_ref, "Blocks processed successfully";
return Some(ImportManagerOutcome::Hello(peer_id));
}
Err(e) => {
let last_element = block_requests.downloaded_blocks.len() - 1;
warn!(self.log, "Block processing failed";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"start_slot" => block_requests.downloaded_blocks[0].slot, "start_slot" => start_slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot, "end_slot" => end_slot,
"no_blocks" => last_element + 1, "no_blocks" => last_element + 1,
"error" => format!("{:?}", e), );
);
return Some(ImportManagerOutcome::DownvotePeer(peer_id)); // check if the batch is complete, by verifying if we have reached the
// target head
if end_slot >= block_requests.target_head_slot {
// Completed, re-hello the peer to ensure we are up to the latest head
event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone()));
// remove the request
false
} else {
// have not reached the end, queue another batch
block_requests.update_start_slot();
re_run = true;
// keep the batch
true
}
}
Err(e) => {
warn!(log_ref, "Block processing failed";
"peer" => format!("{:?}", peer_id),
"start_slot" => start_slot,
"end_slot" => end_slot,
"no_blocks" => last_element + 1,
"error" => format!("{:?}", e),
);
event_queue_ref.push(ImportManagerOutcome::DownvotePeer(peer_id.clone()));
false
}
} }
} else {
// chain no longer exists, empty the queue and return
event_queue_ref.clear();
return false;
} }
} });
None
re_run
} }
fn process_parent_requests(&mut self) -> Option<ImportManagerOutcome> { fn process_parent_requests(&mut self) {
// check to make sure there are peers to search for the parent from
if self.full_peers.is_empty() {
return;
}
// remove any failed requests // remove any failed requests
let debug_log = &self.log; let debug_log = &self.log;
self.parent_queue.retain(|parent_request| { self.parent_queue.retain(|parent_request| {
@ -649,11 +741,6 @@ impl<T: BeaconChainTypes> ImportManager<T> {
} }
}); });
// check to make sure there are peers to search for the parent from
if self.full_peers.is_empty() {
return None;
}
// check if parents need to be searched for // check if parents need to be searched for
for parent_request in self.parent_queue.iter_mut() { for parent_request in self.parent_queue.iter_mut() {
if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE {
@ -677,23 +764,21 @@ impl<T: BeaconChainTypes> ImportManager<T> {
// select a random fully synced peer to attempt to download the parent block // select a random fully synced peer to attempt to download the parent block
let peer_id = self.full_peers.iter().next().expect("List is not empty"); let peer_id = self.full_peers.iter().next().expect("List is not empty");
return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); self.event_queue
.push(ImportManagerOutcome::RecentRequest(peer_id.clone(), req));
} }
} }
None
} }
fn process_complete_parent_requests(&mut self) -> (bool, Option<ImportManagerOutcome>) { fn process_complete_parent_requests(&mut self) -> bool {
// flag to determine if there is more process to drive or if the manager can be switched to // returned value indicating whether the manager can be switched to idle or not
// an idle state
let mut re_run = false; let mut re_run = false;
// Find any parent_requests ready to be processed // Find any parent_requests ready to be processed
for completed_request in self for completed_request in self
.parent_queue .parent_queue
.iter_mut() .iter_mut()
.filter(|req| req.state == BlockRequestsState::Complete) .filter(|req| req.state == BlockRequestsState::ReadyToProcess)
{ {
// verify the last added block is the parent of the last requested block // verify the last added block is the parent of the last requested block
let last_index = completed_request.downloaded_blocks.len() - 1; let last_index = completed_request.downloaded_blocks.len() - 1;
@ -711,7 +796,9 @@ impl<T: BeaconChainTypes> ImportManager<T> {
"received_block" => format!("{}", block_hash), "received_block" => format!("{}", block_hash),
"expected_parent" => format!("{}", expected_hash), "expected_parent" => format!("{}", expected_hash),
); );
return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); re_run = true;
self.event_queue
.push(ImportManagerOutcome::DownvotePeer(peer));
} }
// try and process the list of blocks up to the requested block // try and process the list of blocks up to the requested block
@ -720,154 +807,153 @@ impl<T: BeaconChainTypes> ImportManager<T> {
.downloaded_blocks .downloaded_blocks
.pop() .pop()
.expect("Block must exist exist"); .expect("Block must exist exist");
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // check if the chain exists
// need to keep looking for parents if let Some(chain) = self.chain.upgrade() {
completed_request.downloaded_blocks.push(block); match chain.process_block(block.clone()) {
completed_request.state = BlockRequestsState::Queued; Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => {
re_run = true; // need to keep looking for parents
break; completed_request.downloaded_blocks.push(block);
} completed_request.state = BlockRequestsState::Queued;
Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} re_run = true;
Ok(outcome) => { break;
// it's a future slot or an invalid block, remove it and try again }
completed_request.failed_attempts += 1; Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {}
trace!( Ok(outcome) => {
self.log, "Invalid parent block"; // it's a future slot or an invalid block, remove it and try again
"outcome" => format!("{:?}", outcome), completed_request.failed_attempts += 1;
"peer" => format!("{:?}", completed_request.last_submitted_peer), trace!(
); self.log, "Invalid parent block";
completed_request.state = BlockRequestsState::Queued; "outcome" => format!("{:?}", outcome),
re_run = true; "peer" => format!("{:?}", completed_request.last_submitted_peer),
return ( );
re_run, completed_request.state = BlockRequestsState::Queued;
Some(ImportManagerOutcome::DownvotePeer( re_run = true;
self.event_queue.push(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(), completed_request.last_submitted_peer.clone(),
)), ));
); return re_run;
} }
Err(e) => { Err(e) => {
completed_request.failed_attempts += 1; completed_request.failed_attempts += 1;
warn!( warn!(
self.log, "Parent processing error"; self.log, "Parent processing error";
"error" => format!("{:?}", e) "error" => format!("{:?}", e)
); );
completed_request.state = BlockRequestsState::Queued; completed_request.state = BlockRequestsState::Queued;
re_run = true; re_run = true;
return ( self.event_queue.push(ImportManagerOutcome::DownvotePeer(
re_run,
Some(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(), completed_request.last_submitted_peer.clone(),
)), ));
); return re_run;
}
} }
} else {
// chain doesn't exist - clear the event queue and return
self.event_queue.clear();
return false;
} }
} }
} }
// remove any full completed and processed parent chains // remove any fully processed parent chains
self.parent_queue.retain(|req| { self.parent_queue.retain(|req| {
if req.state == BlockRequestsState::Complete { if req.state == BlockRequestsState::ReadyToProcess {
false false
} else { } else {
true true
} }
}); });
(re_run, None) re_run
} }
}
fn process_blocks(&mut self, blocks: Vec<BeaconBlock<T::EthSpec>>) -> Result<(), String> { // Helper function to process blocks
for block in blocks { fn process_blocks<T: BeaconChainTypes>(
let processing_result = self.chain.process_block(block.clone()); chain: Arc<BeaconChain<T>>,
blocks: Vec<BeaconBlock<T::EthSpec>>,
log: &Logger,
) -> Result<(), String> {
for block in blocks {
let processing_result = chain.process_block(block.clone());
if let Ok(outcome) = processing_result { if let Ok(outcome) = processing_result {
match outcome { match outcome {
BlockProcessingOutcome::Processed { block_root } => { BlockProcessingOutcome::Processed { block_root } => {
// The block was valid and we processed it successfully. // The block was valid and we processed it successfully.
trace!(
log, "Imported block from network";
"slot" => block.slot,
"block_root" => format!("{}", block_root),
);
}
BlockProcessingOutcome::ParentUnknown { parent } => {
// blocks should be sequential and all parents should exist
trace!(
log, "ParentBlockUnknown";
"parent_root" => format!("{}", parent),
"baby_block_slot" => block.slot,
);
return Err(format!(
"Block at slot {} has an unknown parent.",
block.slot
));
}
BlockProcessingOutcome::FutureSlot {
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
trace!( trace!(
self.log, "Imported block from network"; log, "FutureBlock";
"slot" => block.slot, "msg" => "block for future slot rejected, check your time",
"block_root" => format!("{}", block_root), "present_slot" => present_slot,
); "block_slot" => block_slot,
} "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
BlockProcessingOutcome::ParentUnknown { parent } => {
// blocks should be sequential and all parents should exist
trace!(
self.log, "ParentBlockUnknown";
"parent_root" => format!("{}", parent),
"baby_block_slot" => block.slot,
); );
return Err(format!( return Err(format!(
"Block at slot {} has an unknown parent.", "Block at slot {} is too far in the future",
block.slot block.slot
)); ));
} } else {
BlockProcessingOutcome::FutureSlot { // The block is in the future, but not too far.
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
trace!(
self.log, "FutureBlock";
"msg" => "block for future slot rejected, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
return Err(format!(
"Block at slot {} is too far in the future",
block.slot
));
} else {
// The block is in the future, but not too far.
trace!(
self.log, "QueuedFutureBlock";
"msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
}
}
BlockProcessingOutcome::FinalizedSlot => {
trace!( trace!(
self.log, "Finalized or earlier block processed"; log, "QueuedFutureBlock";
"outcome" => format!("{:?}", outcome), "msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
); );
// block reached our finalized slot or was earlier, move to the next block
}
_ => {
trace!(
self.log, "InvalidBlock";
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
);
return Err(format!("Invalid block at slot {}", block.slot));
} }
} }
} else { BlockProcessingOutcome::FinalizedSlot => {
trace!( trace!(
self.log, "BlockProcessingFailure"; log, "Finalized or earlier block processed";
"msg" => "unexpected condition in processing block.", "outcome" => format!("{:?}", outcome),
"outcome" => format!("{:?}", processing_result) );
); // block reached our finalized slot or was earlier, move to the next block
return Err(format!( }
"Unexpected block processing error: {:?}", _ => {
processing_result trace!(
)); log, "InvalidBlock";
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
);
return Err(format!("Invalid block at slot {}", block.slot));
}
} }
} else {
trace!(
log, "BlockProcessingFailure";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", processing_result)
);
return Err(format!(
"Unexpected block processing error: {:?}",
processing_result
));
} }
Ok(())
} }
} Ok(())
fn root_at_slot<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
target_slot: Slot,
) -> Option<Hash256> {
chain
.rev_iter_block_roots()
.find(|(_root, slot)| *slot == target_slot)
.map(|(root, _slot)| root)
} }

View File

@ -6,7 +6,6 @@ use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use slog::{debug, info, o, trace, warn}; use slog::{debug, info, o, trace, warn};
use ssz::Encode; use ssz::Encode;
use std::ops::Sub;
use std::sync::Arc; use std::sync::Arc;
use store::Store; use store::Store;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -190,7 +189,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
trace!( trace!(
self.log, "Out of date or potentially sync'd peer found"; self.log, "Out of date or potentially sync'd peer found";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"remote_head_slot" => remote.head_slot "remote_head_slot" => remote.head_slot,
"remote_latest_finalized_epoch" => remote.finalized_epoch, "remote_latest_finalized_epoch" => remote.finalized_epoch,
); );
@ -386,7 +385,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"msg" => "Failed to return all requested hashes", "msg" => "Failed to return all requested hashes",
"start_slot" => req.start_slot, "start_slot" => req.start_slot,
"current_slot" => self.chain.present_slot(), "current_slot" => self.chain.best_slot(),
"requested" => req.count, "requested" => req.count,
"returned" => blocks.len(), "returned" => blocks.len(),
); );

View File

@ -33,14 +33,14 @@ fn main() {
.arg( .arg(
Arg::with_name("logfile") Arg::with_name("logfile")
.long("logfile") .long("logfile")
.value_name("logfile") .value_name("FILE")
.help("File path where output will be written.") .help("File path where output will be written.")
.takes_value(true), .takes_value(true),
) )
.arg( .arg(
Arg::with_name("network-dir") Arg::with_name("network-dir")
.long("network-dir") .long("network-dir")
.value_name("NETWORK-DIR") .value_name("DIR")
.help("Data directory for network keys.") .help("Data directory for network keys.")
.takes_value(true) .takes_value(true)
.global(true) .global(true)
@ -83,7 +83,7 @@ fn main() {
Arg::with_name("boot-nodes") Arg::with_name("boot-nodes")
.long("boot-nodes") .long("boot-nodes")
.allow_hyphen_values(true) .allow_hyphen_values(true)
.value_name("BOOTNODES") .value_name("ENR-LIST")
.help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.")
.takes_value(true), .takes_value(true),
) )
@ -128,13 +128,14 @@ fn main() {
.arg( .arg(
Arg::with_name("rpc-address") Arg::with_name("rpc-address")
.long("rpc-address") .long("rpc-address")
.value_name("Address") .value_name("ADDRESS")
.help("Listen address for RPC endpoint.") .help("Listen address for RPC endpoint.")
.takes_value(true), .takes_value(true),
) )
.arg( .arg(
Arg::with_name("rpc-port") Arg::with_name("rpc-port")
.long("rpc-port") .long("rpc-port")
.value_name("PORT")
.help("Listen port for RPC endpoint.") .help("Listen port for RPC endpoint.")
.conflicts_with("port-bump") .conflicts_with("port-bump")
.takes_value(true), .takes_value(true),
@ -149,14 +150,14 @@ fn main() {
.arg( .arg(
Arg::with_name("api-address") Arg::with_name("api-address")
.long("api-address") .long("api-address")
.value_name("APIADDRESS") .value_name("ADDRESS")
.help("Set the listen address for the RESTful HTTP API server.") .help("Set the listen address for the RESTful HTTP API server.")
.takes_value(true), .takes_value(true),
) )
.arg( .arg(
Arg::with_name("api-port") Arg::with_name("api-port")
.long("api-port") .long("api-port")
.value_name("APIPORT") .value_name("PORT")
.help("Set the listen TCP port for the RESTful HTTP API server.") .help("Set the listen TCP port for the RESTful HTTP API server.")
.conflicts_with("port-bump") .conflicts_with("port-bump")
.takes_value(true), .takes_value(true),