Fix some typos (#3376)

## Proposed Changes

This PR fixes various minor typos in the project.
This commit is contained in:
Justin Traglia 2022-07-27 00:51:06 +00:00
parent 44fae52cd7
commit 0f62d900fe
16 changed files with 25 additions and 25 deletions

View File

@ -86,7 +86,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
/// ///
/// - There is a cache `item` present. /// - There is a cache `item` present.
/// - If `request_slot` is in the same epoch as `item.epoch`. /// - If `request_slot` is in the same epoch as `item.epoch`.
/// - If `request_index` does not exceed `item.comittee_count`. /// - If `request_index` does not exceed `item.committee_count`.
pub fn try_attest( pub fn try_attest(
&self, &self,
request_slot: Slot, request_slot: Slot,

View File

@ -30,9 +30,9 @@ pub struct GossipCache {
proposer_slashing: Option<Duration>, proposer_slashing: Option<Duration>,
/// Timeout for attester slashings. /// Timeout for attester slashings.
attester_slashing: Option<Duration>, attester_slashing: Option<Duration>,
/// Timeout for aggregated sync commitee signatures. /// Timeout for aggregated sync committee signatures.
signed_contribution_and_proof: Option<Duration>, signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync commitee messages. /// Timeout for sync committee messages.
sync_committee_message: Option<Duration>, sync_committee_message: Option<Duration>,
} }
@ -51,9 +51,9 @@ pub struct GossipCacheBuilder {
proposer_slashing: Option<Duration>, proposer_slashing: Option<Duration>,
/// Timeout for attester slashings. /// Timeout for attester slashings.
attester_slashing: Option<Duration>, attester_slashing: Option<Duration>,
/// Timeout for aggregated sync commitee signatures. /// Timeout for aggregated sync committee signatures.
signed_contribution_and_proof: Option<Duration>, signed_contribution_and_proof: Option<Duration>,
/// Timeout for sync commitee messages. /// Timeout for sync committee messages.
sync_committee_message: Option<Duration>, sync_committee_message: Option<Duration>,
} }
@ -101,13 +101,13 @@ impl GossipCacheBuilder {
self self
} }
/// Timeout for aggregated sync commitee signatures. /// Timeout for aggregated sync committee signatures.
pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self { pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self {
self.signed_contribution_and_proof = Some(timeout); self.signed_contribution_and_proof = Some(timeout);
self self
} }
/// Timeout for sync commitee messages. /// Timeout for sync committee messages.
pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self { pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self {
self.sync_committee_message = Some(timeout); self.sync_committee_message = Some(timeout);
self self

View File

@ -362,7 +362,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg),
// process any sync committee service events // process any sync committee service events
Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg),
event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await,
@ -774,7 +774,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
} }
} }
fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) {
match msg { match msg {
SubnetServiceMessage::Subscribe(subnet) => { SubnetServiceMessage::Subscribe(subnet) => {
for fork_digest in self.required_gossip_fork_digests() { for fork_digest in self.required_gossip_fork_digests() {

View File

@ -33,7 +33,7 @@ Lighthouse maintains two permanent branches:
- [`stable`][stable]: Always points to the latest stable release. - [`stable`][stable]: Always points to the latest stable release.
- This is ideal for most users. - This is ideal for most users.
- [`unstable`][unstable]: Used for development, contains the latest PRs. - [`unstable`][unstable]: Used for development, contains the latest PRs.
- Developers should base thier PRs on this branch. - Developers should base their PRs on this branch.
## Ethereum consensus client ## Ethereum consensus client

View File

@ -73,7 +73,7 @@ The `stability` is:
The `arch` is: The `arch` is:
* `-amd64` for x86_64, e.g. Intel, AMD * `-amd64` for x86_64, e.g. Intel, AMD
* `-arm64` for aarch64, e.g. Rasperry Pi 4 * `-arm64` for aarch64, e.g. Raspberry Pi 4
* empty for a multi-arch image (works on either `amd64` or `arm64` platforms) * empty for a multi-arch image (works on either `amd64` or `arm64` platforms)
The `modernity` is: The `modernity` is:

View File

@ -14,7 +14,7 @@ The community maintains additional installation methods (currently only one).
Additionally, there are two extra guides for specific uses: Additionally, there are two extra guides for specific uses:
- [Rapsberry Pi 4 guide](./pi.md). - [Raspberry Pi 4 guide](./pi.md).
- [Cross-compiling guide for developers](./cross-compiling.md). - [Cross-compiling guide for developers](./cross-compiling.md).
## Minimum System Requirements ## Minimum System Requirements

View File

@ -34,7 +34,7 @@ Remember, if you get stuck you can always reach out on our [Discord][discord].
> >
> **Please note**: the Lighthouse team does not take any responsibility for losses or damages > **Please note**: the Lighthouse team does not take any responsibility for losses or damages
> occured through the use of Lighthouse. We have an experienced internal security team and have > occurred through the use of Lighthouse. We have an experienced internal security team and have
> undergone multiple third-party security-reviews, however the possibility of bugs or malicious > undergone multiple third-party security-reviews, however the possibility of bugs or malicious
> interference remains a real and constant threat. Validators should be prepared to lose some rewards > interference remains a real and constant threat. Validators should be prepared to lose some rewards
> due to the actions of other actors on the consensus layer or software bugs. See the > due to the actions of other actors on the consensus layer or software bugs. See the

View File

@ -19,7 +19,7 @@ The additional requirements for developers are:
## Using `make` ## Using `make`
Commands to run the test suite are avaiable via the `Makefile` in the Commands to run the test suite are available via the `Makefile` in the
project root for the benefit of CI/CD. We list some of these commands below so project root for the benefit of CI/CD. We list some of these commands below so
you can run them locally and avoid CI failures: you can run them locally and avoid CI failures:

View File

@ -1,6 +1,6 @@
# Running a Slasher # Running a Slasher
Lighthouse includes a slasher for identifying slashable offences comitted by other validators and Lighthouse includes a slasher for identifying slashable offences committed by other validators and
including proof of those offences in blocks. including proof of those offences in blocks.
Running a slasher is a good way to contribute to the health of the network, and doing so can earn Running a slasher is a good way to contribute to the health of the network, and doing so can earn
@ -69,7 +69,7 @@ The slasher uses MDBX as its backing store, which places a hard limit on the siz
file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after
initialization if the limit is reached. initialization if the limit is reached.
By default the limit is set to accomodate the default history length and around 300K validators but By default the limit is set to accommodate the default history length and around 300K validators but
you can set it lower if running with a reduced history length. The space required scales you can set it lower if running with a reduced history length. The space required scales
approximately linearly in validator count and history length, i.e. if you halve either you can halve approximately linearly in validator count and history length, i.e. if you halve either you can halve
the space required. the space required.
@ -134,7 +134,7 @@ the slot duration.
### Chunk Size and Validator Chunk Size ### Chunk Size and Validator Chunk Size
* Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS` * Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS`
* Arguments: number of ecochs, number of validators * Arguments: number of epochs, number of validators
* Defaults: 16, 256 * Defaults: 16, 256
Adjusting these parameter should only be done in conjunction with reading in detail Adjusting these parameter should only be done in conjunction with reading in detail

View File

@ -54,7 +54,7 @@ Examples where it is **ineffective** are:
clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using
different datadirs, or two clients on completely different machines (e.g. one on a cloud server different datadirs, or two clients on completely different machines (e.g. one on a cloud server
and one running locally). You are responsible for ensuring that your validator keys are never and one running locally). You are responsible for ensuring that your validator keys are never
running simultanously the slashing protection DB **cannot protect you in this case**. running simultaneously the slashing protection DB **cannot protect you in this case**.
* Importing keys from another client without also importing voting history. * Importing keys from another client without also importing voting history.
* If you use `--init-slashing-protection` to recreate a missing slashing protection database. * If you use `--init-slashing-protection` to recreate a missing slashing protection database.

View File

@ -1,6 +1,6 @@
# Importing from the Ethereum Staking Launch pad # Importing from the Ethereum Staking Launch pad
The [Staking Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website
from the Ethereum Foundation which guides users how to use the from the Ethereum Foundation which guides users how to use the
[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) [`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli)
command-line program to generate consensus validator keys. command-line program to generate consensus validator keys.

View File

@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet'
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000
# Mar 11th, 2022, 14:00 UTC # Mar 11th, 2022, 14:00 UTC
MIN_GENESIS_TIME: 1647007200 MIN_GENESIS_TIME: 1647007200
# Gensis fork # Genesis fork
GENESIS_FORK_VERSION: 0x70000069 GENESIS_FORK_VERSION: 0x70000069
# 300 seconds (5 min) # 300 seconds (5 min)
GENESIS_DELAY: 300 GENESIS_DELAY: 300

View File

@ -394,7 +394,7 @@ async fn invalid_attestation_no_committee_for_index() {
&spec, &spec,
); );
// Expecting NoCommitee because we manually set the attestation's index to be invalid // Expecting NoCommittee because we manually set the attestation's index to be invalid
assert_eq!( assert_eq!(
result, result,
Err(BlockProcessingError::AttestationInvalid { Err(BlockProcessingError::AttestationInvalid {
@ -471,7 +471,7 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() {
&spec, &spec,
); );
// Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the commitee size. // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the committee size.
assert_eq!( assert_eq!(
result, result,
Err(BlockProcessingError::BeaconStateError( Err(BlockProcessingError::BeaconStateError(

View File

@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80
# Number of beacon_node instances that you intend to run # Number of beacon_node instances that you intend to run
BN_COUNT=4 BN_COUNT=4
# Number of valicator clients # Number of validator clients
VC_COUNT=$BN_COUNT VC_COUNT=$BN_COUNT
# Number of seconds to delay to start genesis block. # Number of seconds to delay to start genesis block.

View File

@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80
# Number of beacon_node instances that you intend to run # Number of beacon_node instances that you intend to run
BN_COUNT=4 BN_COUNT=4
# Number of valicator clients # Number of validator clients
VC_COUNT=$BN_COUNT VC_COUNT=$BN_COUNT
# Number of seconds to delay to start genesis block. # Number of seconds to delay to start genesis block.

View File

@ -440,7 +440,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
context.service_context("sync_committee".into()), context.service_context("sync_committee".into()),
); );
// Wait until genesis has occured. // Wait until genesis has occurred.
// //
// It seems most sensible to move this into the `start_service` function, but I'm caution // It seems most sensible to move this into the `start_service` function, but I'm caution
// of making too many changes this close to genesis (<1 week). // of making too many changes this close to genesis (<1 week).