Activate clippy::manual_let_else
lint (#4889)
## Issue Addressed #4888 ## Proposed Changes Enabled `clippy::manual_let_else` lint and resolved the warning messages.
This commit is contained in:
parent
a9f9dc241d
commit
4ce01ddd11
1
Makefile
1
Makefile
@ -208,6 +208,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
|
|||||||
lint:
|
lint:
|
||||||
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
|
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
|
||||||
-D clippy::fn_to_numeric_cast_any \
|
-D clippy::fn_to_numeric_cast_any \
|
||||||
|
-D clippy::manual_let_else \
|
||||||
-D warnings \
|
-D warnings \
|
||||||
-A clippy::derive_partial_eq_without_eq \
|
-A clippy::derive_partial_eq_without_eq \
|
||||||
-A clippy::from-over-into \
|
-A clippy::from-over-into \
|
||||||
|
@ -601,10 +601,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> Result<Option<BeaconForkChoice<T>>, Error> {
|
) -> Result<Option<BeaconForkChoice<T>>, Error> {
|
||||||
let persisted_fork_choice =
|
let Some(persisted_fork_choice) =
|
||||||
match store.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)? {
|
store.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)?
|
||||||
Some(fc) => fc,
|
else {
|
||||||
None => return Ok(None),
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let fc_store =
|
let fc_store =
|
||||||
@ -3485,9 +3485,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
) -> Result<(), BlockError<T::EthSpec>> {
|
) -> Result<(), BlockError<T::EthSpec>> {
|
||||||
// Only perform the weak subjectivity check if it was configured.
|
// Only perform the weak subjectivity check if it was configured.
|
||||||
let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint {
|
let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else {
|
||||||
checkpoint
|
|
||||||
} else {
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
||||||
@ -5336,10 +5334,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
)
|
)
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
let (forkchoice_update_params, pre_payload_attributes) =
|
let Some((forkchoice_update_params, Some(pre_payload_attributes))) = maybe_prep_data else {
|
||||||
if let Some((fcu, Some(pre_payload))) = maybe_prep_data {
|
|
||||||
(fcu, pre_payload)
|
|
||||||
} else {
|
|
||||||
// Appropriate log messages have already been logged above and in
|
// Appropriate log messages have already been logged above and in
|
||||||
// `get_pre_payload_attributes`.
|
// `get_pre_payload_attributes`.
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -5436,10 +5431,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let till_prepare_slot =
|
let Some(till_prepare_slot) = self.slot_clock.duration_to_slot(prepare_slot) else {
|
||||||
if let Some(duration) = self.slot_clock.duration_to_slot(prepare_slot) {
|
|
||||||
duration
|
|
||||||
} else {
|
|
||||||
// `SlotClock::duration_to_slot` will return `None` when we are past the start
|
// `SlotClock::duration_to_slot` will return `None` when we are past the start
|
||||||
// of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case,
|
// of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case,
|
||||||
// it's too late.
|
// it's too late.
|
||||||
|
@ -451,23 +451,21 @@ async fn availability_cache_maintenance_service<T: BeaconChainTypes>(
|
|||||||
let additional_delay = (epoch_duration * 3) / 4;
|
let additional_delay = (epoch_duration * 3) / 4;
|
||||||
tokio::time::sleep(duration + additional_delay).await;
|
tokio::time::sleep(duration + additional_delay).await;
|
||||||
|
|
||||||
let deneb_fork_epoch = match chain.spec.deneb_fork_epoch {
|
let Some(deneb_fork_epoch) = chain.spec.deneb_fork_epoch else {
|
||||||
Some(epoch) => epoch,
|
// shutdown service if deneb fork epoch not set
|
||||||
None => break, // shutdown service if deneb fork epoch not set
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
chain.log,
|
chain.log,
|
||||||
"Availability cache maintenance service firing";
|
"Availability cache maintenance service firing";
|
||||||
);
|
);
|
||||||
|
let Some(current_epoch) = chain
|
||||||
let current_epoch = match chain
|
|
||||||
.slot_clock
|
.slot_clock
|
||||||
.now()
|
.now()
|
||||||
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||||
{
|
else {
|
||||||
Some(epoch) => epoch,
|
continue;
|
||||||
None => continue, // we'll have to try again next time I suppose..
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if current_epoch < deneb_fork_epoch {
|
if current_epoch < deneb_fork_epoch {
|
||||||
|
@ -547,9 +547,8 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
|||||||
.peek_lru()
|
.peek_lru()
|
||||||
.map(|(key, value)| (*key, value.clone()));
|
.map(|(key, value)| (*key, value.clone()));
|
||||||
|
|
||||||
let (lru_root, lru_pending_components) = match lru_entry {
|
let Some((lru_root, lru_pending_components)) = lru_entry else {
|
||||||
Some((r, p)) => (r, p),
|
break;
|
||||||
None => break,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if lru_pending_components
|
if lru_pending_components
|
||||||
@ -605,9 +604,8 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
|||||||
let delete_if_outdated = |cache: &OverflowLRUCache<T>,
|
let delete_if_outdated = |cache: &OverflowLRUCache<T>,
|
||||||
block_data: Option<BlockData>|
|
block_data: Option<BlockData>|
|
||||||
-> Result<(), AvailabilityCheckError> {
|
-> Result<(), AvailabilityCheckError> {
|
||||||
let block_data = match block_data {
|
let Some(block_data) = block_data else {
|
||||||
Some(block_data) => block_data,
|
return Ok(());
|
||||||
None => return Ok(()),
|
|
||||||
};
|
};
|
||||||
let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root);
|
let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root);
|
||||||
if not_in_store_keys {
|
if not_in_store_keys {
|
||||||
|
@ -99,9 +99,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Option<Attestation<E>>, Error> {
|
) -> Result<Option<Attestation<E>>, Error> {
|
||||||
let lock = self.item.read();
|
let lock = self.item.read();
|
||||||
let item = if let Some(item) = lock.as_ref() {
|
let Some(item) = lock.as_ref() else {
|
||||||
item
|
|
||||||
} else {
|
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -16,15 +16,14 @@ pub fn upgrade_to_v12<T: BeaconChainTypes>(
|
|||||||
let spec = db.get_chain_spec();
|
let spec = db.get_chain_spec();
|
||||||
|
|
||||||
// Load a V5 op pool and transform it to V12.
|
// Load a V5 op pool and transform it to V12.
|
||||||
let PersistedOperationPoolV5 {
|
let Some(PersistedOperationPoolV5 {
|
||||||
attestations_v5,
|
attestations_v5,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings_v5,
|
attester_slashings_v5,
|
||||||
proposer_slashings_v5,
|
proposer_slashings_v5,
|
||||||
voluntary_exits_v5,
|
voluntary_exits_v5,
|
||||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
@ -168,15 +167,14 @@ pub fn downgrade_from_v12<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V12 op pool and transform it to V5.
|
// Load a V12 op pool and transform it to V5.
|
||||||
let PersistedOperationPoolV12::<T::EthSpec> {
|
let Some(PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool_v12
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
|
@ -18,16 +18,11 @@ fn get_slot_clock<T: BeaconChainTypes>(
|
|||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> Result<Option<T::SlotClock>, Error> {
|
) -> Result<Option<T::SlotClock>, Error> {
|
||||||
let spec = db.get_chain_spec();
|
let spec = db.get_chain_spec();
|
||||||
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else {
|
||||||
block
|
|
||||||
} else {
|
|
||||||
error!(log, "Missing genesis block");
|
error!(log, "Missing genesis block");
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let genesis_state =
|
let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else {
|
||||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
|
||||||
state
|
|
||||||
} else {
|
|
||||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
@ -43,15 +38,14 @@ pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V12 op pool and transform it to V14.
|
// Load a V12 op pool and transform it to V14.
|
||||||
let PersistedOperationPoolV12::<T::EthSpec> {
|
let Some(PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool_v12
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
@ -94,16 +88,15 @@ pub fn downgrade_from_v14<T: BeaconChainTypes>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load a V14 op pool and transform it to V12.
|
// Load a V14 op pool and transform it to V12.
|
||||||
let PersistedOperationPoolV14::<T::EthSpec> {
|
let Some(PersistedOperationPoolV14::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
|
@ -11,16 +11,15 @@ pub fn upgrade_to_v15<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V14 op pool and transform it to V15.
|
// Load a V14 op pool and transform it to V15.
|
||||||
let PersistedOperationPoolV14::<T::EthSpec> {
|
let Some(PersistedOperationPoolV14::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
} = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool_v14
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
@ -43,7 +42,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V15 op pool and transform it to V14.
|
// Load a V15 op pool and transform it to V14.
|
||||||
let PersistedOperationPoolV15::<T::EthSpec> {
|
let Some(PersistedOperationPoolV15::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
@ -51,9 +50,8 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
|||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
capella_bls_change_broadcast_indices,
|
capella_bls_change_broadcast_indices,
|
||||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||||
op_pool
|
else {
|
||||||
} else {
|
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
};
|
};
|
||||||
|
@ -17,16 +17,11 @@ fn get_slot_clock<T: BeaconChainTypes>(
|
|||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> Result<Option<T::SlotClock>, Error> {
|
) -> Result<Option<T::SlotClock>, Error> {
|
||||||
let spec = db.get_chain_spec();
|
let spec = db.get_chain_spec();
|
||||||
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else {
|
||||||
block
|
|
||||||
} else {
|
|
||||||
error!(log, "Missing genesis block");
|
error!(log, "Missing genesis block");
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let genesis_state =
|
let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else {
|
||||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
|
||||||
state
|
|
||||||
} else {
|
|
||||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
@ -113,14 +113,11 @@ async fn state_advance_timer<T: BeaconChainTypes>(
|
|||||||
let slot_duration = slot_clock.slot_duration();
|
let slot_duration = slot_clock.slot_duration();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() {
|
let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else {
|
||||||
Some(duration) => duration,
|
|
||||||
None => {
|
|
||||||
error!(log, "Failed to read slot clock");
|
error!(log, "Failed to read slot clock");
|
||||||
// If we can't read the slot clock, just wait another slot.
|
// If we can't read the slot clock, just wait another slot.
|
||||||
sleep(slot_duration).await;
|
sleep(slot_duration).await;
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Run the state advance 3/4 of the way through the slot (9s on mainnet).
|
// Run the state advance 3/4 of the way through the slot (9s on mainnet).
|
||||||
|
@ -1799,13 +1799,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let block = if let Some(block) = engine
|
let Some(block) = engine
|
||||||
.api
|
.api
|
||||||
.get_block_by_hash_with_txns::<T>(hash, fork)
|
.get_block_by_hash_with_txns::<T>(hash, fork)
|
||||||
.await?
|
.await?
|
||||||
{
|
else {
|
||||||
block
|
|
||||||
} else {
|
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -426,9 +426,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
|
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
|
||||||
let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) {
|
let Some(parent) = self.blocks.get(&payload.parent_hash()) else {
|
||||||
parent
|
|
||||||
} else {
|
|
||||||
return PayloadStatusV1 {
|
return PayloadStatusV1 {
|
||||||
status: PayloadStatusV1Status::Syncing,
|
status: PayloadStatusV1Status::Syncing,
|
||||||
latest_valid_hash: None,
|
latest_valid_hash: None,
|
||||||
|
@ -30,9 +30,7 @@ pub fn sync_committee_duties<T: BeaconChainTypes>(
|
|||||||
request_indices: &[u64],
|
request_indices: &[u64],
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<SyncDuties, warp::reject::Rejection> {
|
) -> Result<SyncDuties, warp::reject::Rejection> {
|
||||||
let altair_fork_epoch = if let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch {
|
let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch else {
|
||||||
altair_fork_epoch
|
|
||||||
} else {
|
|
||||||
// Empty response for networks with Altair disabled.
|
// Empty response for networks with Altair disabled.
|
||||||
return Ok(convert_to_response(vec![], false));
|
return Ok(convert_to_response(vec![], false));
|
||||||
};
|
};
|
||||||
|
@ -135,9 +135,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
|||||||
if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 {
|
if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 {
|
||||||
return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2())));
|
return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2())));
|
||||||
}
|
}
|
||||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||||
Some(len) => len,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||||
@ -277,9 +276,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||||
Some(len) => len,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||||
@ -324,9 +322,8 @@ impl<TSpec: EthSpec> OutboundCodec<OutboundRequest<TSpec>> for SSZSnappyOutbound
|
|||||||
&mut self,
|
&mut self,
|
||||||
src: &mut BytesMut,
|
src: &mut BytesMut,
|
||||||
) -> Result<Option<Self::CodecErrorType>, RPCError> {
|
) -> Result<Option<Self::CodecErrorType>, RPCError> {
|
||||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||||
Some(len) => len,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||||
|
@ -286,9 +286,7 @@ where
|
|||||||
// wrong state a response will fail silently.
|
// wrong state a response will fail silently.
|
||||||
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
|
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
|
||||||
// check if the stream matching the response still exists
|
// check if the stream matching the response still exists
|
||||||
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
|
let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else {
|
||||||
info
|
|
||||||
} else {
|
|
||||||
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
||||||
// the stream is closed after sending the expected number of responses
|
// the stream is closed after sending the expected number of responses
|
||||||
trace!(self.log, "Inbound stream has expired. Response not sent";
|
trace!(self.log, "Inbound stream has expired. Response not sent";
|
||||||
@ -296,7 +294,6 @@ where
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the response we are sending is an error, report back for handling
|
// If the response we are sending is an error, report back for handling
|
||||||
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
||||||
self.events_out.push(Err(HandlerErr::Inbound {
|
self.events_out.push(Err(HandlerErr::Inbound {
|
||||||
|
@ -205,9 +205,8 @@ impl GossipCache {
|
|||||||
GossipKind::LightClientFinalityUpdate => self.light_client_finality_update,
|
GossipKind::LightClientFinalityUpdate => self.light_client_finality_update,
|
||||||
GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update,
|
GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update,
|
||||||
};
|
};
|
||||||
let expire_timeout = match expire_timeout {
|
let Some(expire_timeout) = expire_timeout else {
|
||||||
Some(expire_timeout) => expire_timeout,
|
return;
|
||||||
None => return,
|
|
||||||
};
|
};
|
||||||
match self
|
match self
|
||||||
.topic_msgs
|
.topic_msgs
|
||||||
|
@ -350,9 +350,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) {
|
let Ok(bootstrap) = LightClientBootstrap::from_beacon_state(&mut beacon_state) else {
|
||||||
Ok(bootstrap) => bootstrap,
|
|
||||||
Err(_) => {
|
|
||||||
self.send_error_response(
|
self.send_error_response(
|
||||||
peer_id,
|
peer_id,
|
||||||
RPCResponseErrorCode::ResourceUnavailable,
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
@ -360,7 +358,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
request_id,
|
request_id,
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
};
|
};
|
||||||
self.send_response(
|
self.send_response(
|
||||||
peer_id,
|
peer_id,
|
||||||
|
@ -115,9 +115,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
duplicate_cache: DuplicateCache,
|
duplicate_cache: DuplicateCache,
|
||||||
) {
|
) {
|
||||||
// Check if the block is already being imported through another source
|
// Check if the block is already being imported through another source
|
||||||
let handle = match duplicate_cache.check_and_insert(block_root) {
|
let Some(handle) = duplicate_cache.check_and_insert(block_root) else {
|
||||||
Some(handle) => handle,
|
|
||||||
None => {
|
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Gossip block is being processed";
|
"Gossip block is being processed";
|
||||||
@ -142,7 +140,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
|
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
|
||||||
};
|
};
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Returns `true` if the time now is after the 4s attestation deadline.
|
// Returns `true` if the time now is after the 4s attestation deadline.
|
||||||
|
@ -509,16 +509,13 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
return Ok(ProcessResult::Successful);
|
return Ok(ProcessResult::Successful);
|
||||||
}
|
}
|
||||||
|
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||||
Some(batch) => batch,
|
|
||||||
None => {
|
|
||||||
return self
|
return self
|
||||||
.fail_sync(BackFillError::InvalidSyncState(format!(
|
.fail_sync(BackFillError::InvalidSyncState(format!(
|
||||||
"Trying to process a batch that does not exist: {}",
|
"Trying to process a batch that does not exist: {}",
|
||||||
batch_id
|
batch_id
|
||||||
)))
|
)))
|
||||||
.map(|_| ProcessResult::Successful);
|
.map(|_| ProcessResult::Successful);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
||||||
@ -909,9 +906,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
network: &mut SyncNetworkContext<T>,
|
network: &mut SyncNetworkContext<T>,
|
||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
) -> Result<(), BackFillError> {
|
) -> Result<(), BackFillError> {
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||||
Some(batch) => batch,
|
return Ok(());
|
||||||
None => return Ok(()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Find a peer to request the batch
|
// Find a peer to request the batch
|
||||||
|
@ -1015,15 +1015,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_))
|
BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_))
|
||||||
| BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => {
|
| BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => {
|
||||||
// Check if the beacon processor is available
|
// Check if the beacon processor is available
|
||||||
let beacon_processor = match cx.beacon_processor_if_enabled() {
|
let Some(beacon_processor) = cx.beacon_processor_if_enabled() else {
|
||||||
Some(beacon_processor) => beacon_processor,
|
|
||||||
None => {
|
|
||||||
return trace!(
|
return trace!(
|
||||||
self.log,
|
self.log,
|
||||||
"Dropping parent chain segment that was ready for processing.";
|
"Dropping parent chain segment that was ready for processing.";
|
||||||
parent_lookup
|
parent_lookup
|
||||||
);
|
);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
let (chain_hash, blocks, hashes, block_request) =
|
let (chain_hash, blocks, hashes, block_request) =
|
||||||
parent_lookup.parts_for_processing();
|
parent_lookup.parts_for_processing();
|
||||||
@ -1195,11 +1192,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
result: BatchProcessResult,
|
result: BatchProcessResult,
|
||||||
cx: &SyncNetworkContext<T>,
|
cx: &SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let request = match self.processing_parent_lookups.remove(&chain_hash) {
|
let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else {
|
||||||
Some((_hashes, request)) => request,
|
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result);
|
||||||
None => {
|
|
||||||
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result)
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result);
|
debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result);
|
||||||
|
@ -294,19 +294,15 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
return Ok(KeepChain);
|
return Ok(KeepChain);
|
||||||
}
|
}
|
||||||
|
|
||||||
let beacon_processor = match network.beacon_processor_if_enabled() {
|
let Some(beacon_processor) = network.beacon_processor_if_enabled() else {
|
||||||
Some(beacon_processor) => beacon_processor,
|
return Ok(KeepChain);
|
||||||
None => return Ok(KeepChain),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||||
Some(batch) => batch,
|
|
||||||
None => {
|
|
||||||
return Err(RemoveChain::WrongChainState(format!(
|
return Err(RemoveChain::WrongChainState(format!(
|
||||||
"Trying to process a batch that does not exist: {}",
|
"Trying to process a batch that does not exist: {}",
|
||||||
batch_id
|
batch_id
|
||||||
)));
|
)));
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
||||||
@ -874,9 +870,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
network: &mut SyncNetworkContext<T>,
|
network: &mut SyncNetworkContext<T>,
|
||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
) -> ProcessingResult {
|
) -> ProcessingResult {
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||||
Some(batch) => batch,
|
return Ok(KeepChain);
|
||||||
None => return Ok(KeepChain),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Find a peer to request the batch
|
// Find a peer to request the batch
|
||||||
|
@ -432,9 +432,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load the blinded block.
|
// Load the blinded block.
|
||||||
let blinded_block = match self.get_blinded_block(block_root)? {
|
let Some(blinded_block) = self.get_blinded_block(block_root)? else {
|
||||||
Some(block) => block,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the block is after the split point then we should have the full execution payload
|
// If the block is after the split point then we should have the full execution payload
|
||||||
@ -2053,12 +2052,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
|
|
||||||
/// Try to prune blobs, approximating the current epoch from the split slot.
|
/// Try to prune blobs, approximating the current epoch from the split slot.
|
||||||
pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> {
|
pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> {
|
||||||
let deneb_fork_epoch = match self.spec.deneb_fork_epoch {
|
let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else {
|
||||||
Some(epoch) => epoch,
|
|
||||||
None => {
|
|
||||||
debug!(self.log, "Deneb fork is disabled");
|
debug!(self.log, "Deneb fork is disabled");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
|
||||||
};
|
};
|
||||||
// The current epoch is >= split_epoch + 2. It could be greater if the database is
|
// The current epoch is >= split_epoch + 2. It could be greater if the database is
|
||||||
// configured to delay updating the split or finalization has ceased. In this instance we
|
// configured to delay updating the split or finalization has ceased. In this instance we
|
||||||
|
@ -17,9 +17,7 @@ where
|
|||||||
Cold: ItemStore<E>,
|
Cold: ItemStore<E>,
|
||||||
{
|
{
|
||||||
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
|
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
|
||||||
let mut anchor = if let Some(anchor) = self.get_anchor_info() {
|
let Some(mut anchor) = self.get_anchor_info() else {
|
||||||
anchor
|
|
||||||
} else {
|
|
||||||
// Nothing to do, history is complete.
|
// Nothing to do, history is complete.
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
@ -16,12 +16,10 @@ pub fn spawn_timer<T: BeaconChainTypes>(
|
|||||||
let log = executor.log().clone();
|
let log = executor.log().clone();
|
||||||
let timer_future = async move {
|
let timer_future = async move {
|
||||||
loop {
|
loop {
|
||||||
let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() {
|
let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot()
|
||||||
Some(duration) => duration,
|
else {
|
||||||
None => {
|
|
||||||
warn!(log, "Unable to determine duration to next slot");
|
warn!(log, "Unable to determine duration to next slot");
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
sleep(duration_to_next_slot).await;
|
sleep(duration_to_next_slot).await;
|
||||||
|
@ -19,19 +19,16 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream {
|
|||||||
let name = &item.ident;
|
let name = &item.ident;
|
||||||
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
|
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
|
||||||
|
|
||||||
let struct_data = match &item.data {
|
let syn::Data::Struct(struct_data) = &item.data else {
|
||||||
syn::Data::Struct(s) => s,
|
panic!("compare_fields_derive only supports structs.");
|
||||||
_ => panic!("compare_fields_derive only supports structs."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut quotes = vec![];
|
let mut quotes = vec![];
|
||||||
|
|
||||||
for field in struct_data.fields.iter() {
|
for field in struct_data.fields.iter() {
|
||||||
let ident_a = match &field.ident {
|
let Some(ident_a) = &field.ident else {
|
||||||
Some(ref ident) => ident,
|
panic!("compare_fields_derive only supports named struct fields.");
|
||||||
_ => panic!("compare_fields_derive only supports named struct fields."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let field_name = ident_a.to_string();
|
let field_name = ident_a.to_string();
|
||||||
let ident_b = ident_a.clone();
|
let ident_b = ident_a.clone();
|
||||||
|
|
||||||
|
@ -916,9 +916,8 @@ impl BeaconNodeHttpClient {
|
|||||||
Error,
|
Error,
|
||||||
> {
|
> {
|
||||||
let path = self.get_beacon_blocks_path(block_id)?;
|
let path = self.get_beacon_blocks_path(block_id)?;
|
||||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||||
Some(res) => res,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some(response.json().await?))
|
Ok(Some(response.json().await?))
|
||||||
@ -932,9 +931,8 @@ impl BeaconNodeHttpClient {
|
|||||||
block_id: BlockId,
|
block_id: BlockId,
|
||||||
) -> Result<Option<GenericResponse<BlobSidecarList<T>>>, Error> {
|
) -> Result<Option<GenericResponse<BlobSidecarList<T>>>, Error> {
|
||||||
let path = self.get_blobs_path(block_id)?;
|
let path = self.get_blobs_path(block_id)?;
|
||||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||||
Some(res) => res,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some(response.json().await?))
|
Ok(Some(response.json().await?))
|
||||||
@ -951,9 +949,8 @@ impl BeaconNodeHttpClient {
|
|||||||
Error,
|
Error,
|
||||||
> {
|
> {
|
||||||
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
||||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||||
Some(res) => res,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some(response.json().await?))
|
Ok(Some(response.json().await?))
|
||||||
|
@ -20,9 +20,8 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream {
|
|||||||
let name = &derived_input.ident;
|
let name = &derived_input.ident;
|
||||||
let (impl_generics, ty_generics, where_clause) = &derived_input.generics.split_for_impl();
|
let (impl_generics, ty_generics, where_clause) = &derived_input.generics.split_for_impl();
|
||||||
|
|
||||||
let struct_data = match &derived_input.data {
|
let syn::Data::Struct(struct_data) = &derived_input.data else {
|
||||||
syn::Data::Struct(s) => s,
|
panic!("test_random_derive only supports structs.");
|
||||||
_ => panic!("test_random_derive only supports structs."),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Build quotes for fields that should be generated and those that should be built from
|
// Build quotes for fields that should be generated and those that should be built from
|
||||||
|
@ -1035,13 +1035,11 @@ impl ProtoArray {
|
|||||||
.epoch
|
.epoch
|
||||||
.start_slot(E::slots_per_epoch());
|
.start_slot(E::slots_per_epoch());
|
||||||
|
|
||||||
let mut node = if let Some(node) = self
|
let Some(mut node) = self
|
||||||
.indices
|
.indices
|
||||||
.get(&root)
|
.get(&root)
|
||||||
.and_then(|index| self.nodes.get(*index))
|
.and_then(|index| self.nodes.get(*index))
|
||||||
{
|
else {
|
||||||
node
|
|
||||||
} else {
|
|
||||||
// An unknown root is not a finalized descendant. This line can only
|
// An unknown root is not a finalized descendant. This line can only
|
||||||
// be reached if the user supplies a root that is not known to fork
|
// be reached if the user supplies a root that is not known to fork
|
||||||
// choice.
|
// choice.
|
||||||
|
@ -99,9 +99,8 @@ pub fn verify_signature_sets<'a>(
|
|||||||
|
|
||||||
// Aggregate all the public keys.
|
// Aggregate all the public keys.
|
||||||
// Public keys have already been checked for subgroup and infinity
|
// Public keys have already been checked for subgroup and infinity
|
||||||
let agg_pk = match blst_core::AggregatePublicKey::aggregate(&signing_keys, false) {
|
let Ok(agg_pk) = blst_core::AggregatePublicKey::aggregate(&signing_keys, false) else {
|
||||||
Ok(agg_pk) => agg_pk,
|
return false;
|
||||||
Err(_) => return false,
|
|
||||||
};
|
};
|
||||||
pks.push(agg_pk.to_public_key());
|
pks.push(agg_pk.to_public_key());
|
||||||
}
|
}
|
||||||
|
@ -254,12 +254,9 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Disable file logging if no path is specified.
|
// Disable file logging if no path is specified.
|
||||||
let path = match config.path {
|
let Some(path) = config.path else {
|
||||||
Some(path) => path,
|
|
||||||
None => {
|
|
||||||
self.log = Some(stdout_logger);
|
self.log = Some(stdout_logger);
|
||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Ensure directories are created becfore the logfile.
|
// Ensure directories are created becfore the logfile.
|
||||||
|
@ -159,9 +159,8 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn
|
|||||||
config: &Config,
|
config: &Config,
|
||||||
) -> Result<Option<Self>, Error> {
|
) -> Result<Option<Self>, Error> {
|
||||||
let disk_key = config.disk_key(validator_chunk_index, chunk_index);
|
let disk_key = config.disk_key(validator_chunk_index, chunk_index);
|
||||||
let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? {
|
let Some(chunk_bytes) = txn.get(Self::select_db(db), &disk_key.to_be_bytes())? else {
|
||||||
Some(chunk_bytes) => chunk_bytes,
|
return Ok(None);
|
||||||
None => return Ok(None),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?;
|
let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?;
|
||||||
@ -448,11 +447,9 @@ pub fn apply_attestation_for_validator<E: EthSpec, T: TargetArrayChunk>(
|
|||||||
return Ok(slashing_status);
|
return Ok(slashing_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut start_epoch = if let Some(start_epoch) =
|
let Some(mut start_epoch) =
|
||||||
T::first_start_epoch(attestation.data.source.epoch, current_epoch, config)
|
T::first_start_epoch(attestation.data.source.epoch, current_epoch, config)
|
||||||
{
|
else {
|
||||||
start_epoch
|
|
||||||
} else {
|
|
||||||
return Ok(slashing_status);
|
return Ok(slashing_status);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -536,10 +533,8 @@ pub fn epoch_update_for_validator<E: EthSpec, T: TargetArrayChunk>(
|
|||||||
current_epoch: Epoch,
|
current_epoch: Epoch,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let previous_current_epoch =
|
let Some(previous_current_epoch) = db.get_current_epoch_for_validator(validator_index, txn)?
|
||||||
if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? {
|
else {
|
||||||
epoch
|
|
||||||
} else {
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -51,13 +51,10 @@ impl<E: EthSpec> Case for MerkleProofValidity<E> {
|
|||||||
fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> {
|
fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> {
|
||||||
let mut state = self.state.clone();
|
let mut state = self.state.clone();
|
||||||
state.initialize_tree_hash_cache();
|
state.initialize_tree_hash_cache();
|
||||||
let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) {
|
let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else {
|
||||||
Ok(proof) => proof,
|
|
||||||
Err(_) => {
|
|
||||||
return Err(Error::FailedToParseTest(
|
return Err(Error::FailedToParseTest(
|
||||||
"Could not retrieve merkle proof".to_string(),
|
"Could not retrieve merkle proof".to_string(),
|
||||||
))
|
));
|
||||||
}
|
|
||||||
};
|
};
|
||||||
let proof_len = proof.len();
|
let proof_len = proof.len();
|
||||||
let branch_len = self.merkle_proof.branch.len();
|
let branch_len = self.merkle_proof.branch.len();
|
||||||
|
@ -525,9 +525,7 @@ impl DoppelgangerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the index from the server response back to a public key.
|
// Resolve the index from the server response back to a public key.
|
||||||
let pubkey = if let Some(pubkey) = indices_map.get(&response.index) {
|
let Some(pubkey) = indices_map.get(&response.index) else {
|
||||||
pubkey
|
|
||||||
} else {
|
|
||||||
crit!(
|
crit!(
|
||||||
self.log,
|
self.log,
|
||||||
"Inconsistent indices map";
|
"Inconsistent indices map";
|
||||||
|
@ -607,9 +607,7 @@ pub async fn fill_in_aggregation_proofs<T: SlotClock + 'static, E: EthSpec>(
|
|||||||
|
|
||||||
// Add to global storage (we add regularly so the proofs can be used ASAP).
|
// Add to global storage (we add regularly so the proofs can be used ASAP).
|
||||||
let sync_map = duties_service.sync_duties.committees.read();
|
let sync_map = duties_service.sync_duties.committees.read();
|
||||||
let committee_duties = if let Some(duties) = sync_map.get(&sync_committee_period) {
|
let Some(committee_duties) = sync_map.get(&sync_committee_period) else {
|
||||||
duties
|
|
||||||
} else {
|
|
||||||
debug!(
|
debug!(
|
||||||
log,
|
log,
|
||||||
"Missing sync duties";
|
"Missing sync duties";
|
||||||
|
@ -158,13 +158,11 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> {
|
|||||||
.checked_sub(slot_duration / 3)
|
.checked_sub(slot_duration / 3)
|
||||||
.unwrap_or_else(|| Duration::from_secs(0));
|
.unwrap_or_else(|| Duration::from_secs(0));
|
||||||
|
|
||||||
let slot_duties = if let Some(duties) = self
|
let Some(slot_duties) = self
|
||||||
.duties_service
|
.duties_service
|
||||||
.sync_duties
|
.sync_duties
|
||||||
.get_duties_for_slot::<E>(slot, &self.duties_service.spec)
|
.get_duties_for_slot::<E>(slot, &self.duties_service.spec)
|
||||||
{
|
else {
|
||||||
duties
|
|
||||||
} else {
|
|
||||||
debug!(log, "No duties known for slot {}", slot);
|
debug!(log, "No duties known for slot {}", slot);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user